diff --git a/CREDITS b/CREDITS
index 61186c85d72..939da46a87f 100644
--- a/CREDITS
+++ b/CREDITS
@@ -464,6 +464,11 @@ S: 1200 Goldenrod Dr.
S: Nampa, Idaho 83686
S: USA
+N: Dirk J. Brandewie
+E: dirk.j.brandewie@intel.com
+E: linux-wimax@intel.com
+D: Intel Wireless WiMAX Connection 2400 SDIO driver
+
N: Derrick J. Brashear
E: shadow@dementia.org
W: http://www.dementia.org/~shadow
@@ -2119,6 +2124,11 @@ N: H.J. Lu
E: hjl@gnu.ai.mit.edu
D: GCC + libraries hacker
+N: Yanir Lubetkin
+E: yanirx.lubatkin@intel.com
+E: linux-wimax@intel.com
+D: Intel Wireless WiMAX Connection 2400 driver
+
N: Michal Ludvig
E: michal@logix.cz
E: michal.ludvig@asterisk.co.nz
@@ -2693,6 +2703,13 @@ S: RR #5, 497 Pole Line Road
S: Thunder Bay, Ontario
S: CANADA P7C 5M9
+N: Inaky Perez-Gonzalez
+E: inaky.perez-gonzalez@intel.com
+E: linux-wimax@intel.com
+E: inakypg@yahoo.com
+D: WiMAX stack
+D: Intel Wireless WiMAX Connection 2400 driver
+
N: Yuri Per
E: yuri@pts.mipt.ru
D: Some smbfs fixes
diff --git a/Documentation/DocBook/networking.tmpl b/Documentation/DocBook/networking.tmpl
index 627707a3cb9..59ad69a9d77 100644
--- a/Documentation/DocBook/networking.tmpl
+++ b/Documentation/DocBook/networking.tmpl
@@ -74,6 +74,14 @@
!Enet/sunrpc/rpcb_clnt.c
!Enet/sunrpc/clnt.c
+ WiMAX
+!Enet/wimax/op-msg.c
+!Enet/wimax/op-reset.c
+!Enet/wimax/op-rfkill.c
+!Enet/wimax/stack.c
+!Iinclude/net/wimax.h
+!Iinclude/linux/wimax.h
+
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 0b3f6711d2f..a58fc8b7339 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -91,6 +91,7 @@ parameter is applicable:
SUSPEND System suspend states are enabled.
FTRACE Function tracing enabled.
TS Appropriate touchscreen support is enabled.
+ UMS USB Mass Storage support is enabled.
USB USB support is enabled.
USBHID USB Human Interface Device support is enabled.
V4L Video For Linux support is enabled.
@@ -2383,6 +2384,41 @@ and is between 256 and 4096 characters. It is defined in the file
usbhid.mousepoll=
[USBHID] The interval which mice are to be polled at.
+ usb-storage.delay_use=
+ [UMS] The delay in seconds before a new device is
+ scanned for Logical Units (default 5).
+
+ usb-storage.quirks=
+ [UMS] A list of quirks entries to supplement or
+ override the built-in unusual_devs list. List
+ entries are separated by commas. Each entry has
+ the form VID:PID:Flags where VID and PID are Vendor
+ and Product ID values (4-digit hex numbers) and
+ Flags is a set of characters, each corresponding
+ to a common usb-storage quirk flag as follows:
+ a = SANE_SENSE (collect more than 18 bytes
+ of sense data);
+ c = FIX_CAPACITY (decrease the reported
+ device capacity by one sector);
+ h = CAPACITY_HEURISTICS (decrease the
+ reported device capacity by one
+ sector if the number is odd);
+ i = IGNORE_DEVICE (don't bind to this
+ device);
+ l = NOT_LOCKABLE (don't try to lock and
+ unlock ejectable media);
+ m = MAX_SECTORS_64 (don't transfer more
+ than 64 sectors = 32 KB at a time);
+ o = CAPACITY_OK (accept the capacity
+ reported by the device);
+ r = IGNORE_RESIDUE (the device reports
+ bogus residue values);
+ s = SINGLE_LUN (the device has only one
+ Logical Unit);
+ w = NO_WP_DETECT (don't test whether the
+ medium is write-protected).
+ Example: quirks=0419:aaf5:rl,0421:0433:rc
+
add_efi_memmap [EFI; x86-32,X86-64] Include EFI memory map in
kernel's map of available physical RAM.
diff --git a/Documentation/usb/power-management.txt b/Documentation/usb/power-management.txt
index e48ea1d5101..ad642615ad4 100644
--- a/Documentation/usb/power-management.txt
+++ b/Documentation/usb/power-management.txt
@@ -313,11 +313,13 @@ three of the methods listed above. In addition, a driver indicates
that it supports autosuspend by setting the .supports_autosuspend flag
in its usb_driver structure. It is then responsible for informing the
USB core whenever one of its interfaces becomes busy or idle. The
-driver does so by calling these three functions:
+driver does so by calling these five functions:
int usb_autopm_get_interface(struct usb_interface *intf);
void usb_autopm_put_interface(struct usb_interface *intf);
int usb_autopm_set_interface(struct usb_interface *intf);
+ int usb_autopm_get_interface_async(struct usb_interface *intf);
+ void usb_autopm_put_interface_async(struct usb_interface *intf);
The functions work by maintaining a counter in the usb_interface
structure. When intf->pm_usage_count is > 0 then the interface is
@@ -330,10 +332,12 @@ associated with the device itself rather than any of its interfaces.
This field is used only by the USB core.)
The driver owns intf->pm_usage_count; it can modify the value however
-and whenever it likes. A nice aspect of the usb_autopm_* routines is
-that the changes they make are protected by the usb_device structure's
-PM mutex (udev->pm_mutex); however drivers may change pm_usage_count
-without holding the mutex.
+and whenever it likes. A nice aspect of the non-async usb_autopm_*
+routines is that the changes they make are protected by the usb_device
+structure's PM mutex (udev->pm_mutex); however drivers may change
+pm_usage_count without holding the mutex. Drivers using the async
+routines are responsible for their own synchronization and mutual
+exclusion.
usb_autopm_get_interface() increments pm_usage_count and
attempts an autoresume if the new value is > 0 and the
@@ -348,6 +352,14 @@ without holding the mutex.
is suspended, and it attempts an autosuspend if the value is
<= 0 and the device isn't suspended.
+ usb_autopm_get_interface_async() and
+ usb_autopm_put_interface_async() do almost the same things as
+ their non-async counterparts. The differences are: they do
+ not acquire the PM mutex, and they use a workqueue to do their
+ jobs. As a result they can be called in an atomic context,
+ such as an URB's completion handler, but when they return the
+ device will not generally not yet be in the desired state.
+
There also are a couple of utility routines drivers can use:
usb_autopm_enable() sets pm_usage_cnt to 0 and then calls
diff --git a/Documentation/wimax/README.i2400m b/Documentation/wimax/README.i2400m
new file mode 100644
index 00000000000..7dffd8919cb
--- /dev/null
+++ b/Documentation/wimax/README.i2400m
@@ -0,0 +1,260 @@
+
+ Driver for the Intel Wireless Wimax Connection 2400m
+
+ (C) 2008 Intel Corporation < linux-wimax@intel.com >
+
+ This provides a driver for the Intel Wireless WiMAX Connection 2400m
+ and a basic Linux kernel WiMAX stack.
+
+1. Requirements
+
+ * Linux installation with Linux kernel 2.6.22 or newer (if building
+ from a separate tree)
+ * Intel i2400m Echo Peak or Baxter Peak; this includes the Intel
+ Wireless WiMAX/WiFi Link 5x50 series.
+ * build tools:
+ + Linux kernel development package for the target kernel; to
+ build against your currently running kernel, you need to have
+ the kernel development package corresponding to the running
+ image installed (usually if your kernel is named
+ linux-VERSION, the development package is called
+ linux-dev-VERSION or linux-headers-VERSION).
+ + GNU C Compiler, make
+
+2. Compilation and installation
+
+2.1. Compilation of the drivers included in the kernel
+
+ Configure the kernel; to enable the WiMAX drivers select Drivers >
+ Networking Drivers > WiMAX device support. Enable all of them as
+ modules (easier).
+
+ If USB or SDIO are not enabled in the kernel configuration, the options
+ to build the i2400m USB or SDIO drivers will not show. Enable said
+ subsystems and go back to the WiMAX menu to enable the drivers.
+
+ Compile and install your kernel as usual.
+
+2.2. Compilation of the drivers distributed as an standalone module
+
+ To compile
+
+$ cd source/directory
+$ make
+
+ Once built you can load and unload using the provided load.sh script;
+ load.sh will load the modules, load.sh u will unload them.
+
+ To install in the default kernel directories (and enable auto loading
+ when the device is plugged):
+
+$ make install
+$ depmod -a
+
+ If your kernel development files are located in a non standard
+ directory or if you want to build for a kernel that is not the
+ currently running one, set KDIR to the right location:
+
+$ make KDIR=/path/to/kernel/dev/tree
+
+ For more information, please contact linux-wimax@intel.com.
+
+3. Installing the firmware
+
+ The firmware can be obtained from http://linuxwimax.org or might have
+ been supplied with your hardware.
+
+ It has to be installed in the target system:
+ *
+$ cp FIRMWAREFILE.sbcf /lib/firmware/i2400m-fw-BUSTYPE-1.3.sbcf
+
+ * NOTE: if your firmware came in an .rpm or .deb file, just install
+ it as normal, with the rpm (rpm -i FIRMWARE.rpm) or dpkg
+ (dpkg -i FIRMWARE.deb) commands. No further action is needed.
+ * BUSTYPE will be usb or sdio, depending on the hardware you have.
+ Each hardware type comes with its own firmware and will not work
+ with other types.
+
+4. Design
+
+ This package contains two major parts: a WiMAX kernel stack and a
+ driver for the Intel i2400m.
+
+ The WiMAX stack is designed to provide for common WiMAX control
+ services to current and future WiMAX devices from any vendor; please
+ see README.wimax for details.
+
+ The i2400m kernel driver is broken up in two main parts: the bus
+ generic driver and the bus-specific drivers. The bus generic driver
+ forms the drivercore and contain no knowledge of the actual method we
+ use to connect to the device. The bus specific drivers are just the
+ glue to connect the bus-generic driver and the device. Currently only
+ USB and SDIO are supported. See drivers/net/wimax/i2400m/i2400m.h for
+ more information.
+
+ The bus generic driver is logically broken up in two parts: OS-glue and
+ hardware-glue. The OS-glue interfaces with Linux. The hardware-glue
+ interfaces with the device on using an interface provided by the
+ bus-specific driver. The reason for this breakup is to be able to
+ easily reuse the hardware-glue to write drivers for other OSes; note
+ the hardware glue part is written as a native Linux driver; no
+ abstraction layers are used, so to port to another OS, the Linux kernel
+ API calls should be replaced with the target OS's.
+
+5. Usage
+
+ To load the driver, follow the instructions in the install section;
+ once the driver is loaded, plug in the device (unless it is permanently
+ plugged in). The driver will enumerate the device, upload the firmware
+ and output messages in the kernel log (dmesg, /var/log/messages or
+ /var/log/kern.log) such as:
+
+...
+i2400m_usb 5-4:1.0: firmware interface version 8.0.0
+i2400m_usb 5-4:1.0: WiMAX interface wmx0 (00:1d:e1:01:94:2c) ready
+
+ At this point the device is ready to work.
+
+ Current versions require the Intel WiMAX Network Service in userspace
+ to make things work. See the network service's README for instructions
+ on how to scan, connect and disconnect.
+
+5.1. Module parameters
+
+ Module parameters can be set at kernel or module load time or by
+ echoing values:
+
+$ echo VALUE > /sys/module/MODULENAME/parameters/PARAMETERNAME
+
+ To make changes permanent, for example, for the i2400m module, you can
+ also create a file named /etc/modprobe.d/i2400m containing:
+
+options i2400m idle_mode_disabled=1
+
+ To find which parameters are supported by a module, run:
+
+$ modinfo path/to/module.ko
+
+ During kernel bootup (if the driver is linked in the kernel), specify
+ the following to the kernel command line:
+
+i2400m.PARAMETER=VALUE
+
+5.1.1. i2400m: idle_mode_disabled
+
+ The i2400m module supports a parameter to disable idle mode. This
+ parameter, once set, will take effect only when the device is
+ reinitialized by the driver (eg: following a reset or a reconnect).
+
+5.2. Debug operations: debugfs entries
+
+ The driver will register debugfs entries that allow the user to tweak
+ debug settings. There are three main container directories where
+ entries are placed, which correspond to the three blocks a i2400m WiMAX
+ driver has:
+ * /sys/kernel/debug/wimax:DEVNAME/ for the generic WiMAX stack
+ controls
+ * /sys/kernel/debug/wimax:DEVNAME/i2400m for the i2400m generic
+ driver controls
+ * /sys/kernel/debug/wimax:DEVNAME/i2400m-usb (or -sdio) for the
+ bus-specific i2400m-usb or i2400m-sdio controls).
+
+ Of course, if debugfs is mounted in a directory other than
+ /sys/kernel/debug, those paths will change.
+
+5.2.1. Increasing debug output
+
+ The files named *dl_* indicate knobs for controlling the debug output
+ of different submodules:
+ *
+# find /sys/kernel/debug/wimax\:wmx0 -name \*dl_\*
+/sys/kernel/debug/wimax:wmx0/i2400m-usb/dl_tx
+/sys/kernel/debug/wimax:wmx0/i2400m-usb/dl_rx
+/sys/kernel/debug/wimax:wmx0/i2400m-usb/dl_notif
+/sys/kernel/debug/wimax:wmx0/i2400m-usb/dl_fw
+/sys/kernel/debug/wimax:wmx0/i2400m-usb/dl_usb
+/sys/kernel/debug/wimax:wmx0/i2400m/dl_tx
+/sys/kernel/debug/wimax:wmx0/i2400m/dl_rx
+/sys/kernel/debug/wimax:wmx0/i2400m/dl_rfkill
+/sys/kernel/debug/wimax:wmx0/i2400m/dl_netdev
+/sys/kernel/debug/wimax:wmx0/i2400m/dl_fw
+/sys/kernel/debug/wimax:wmx0/i2400m/dl_debugfs
+/sys/kernel/debug/wimax:wmx0/i2400m/dl_driver
+/sys/kernel/debug/wimax:wmx0/i2400m/dl_control
+/sys/kernel/debug/wimax:wmx0/wimax_dl_stack
+/sys/kernel/debug/wimax:wmx0/wimax_dl_op_rfkill
+/sys/kernel/debug/wimax:wmx0/wimax_dl_op_reset
+/sys/kernel/debug/wimax:wmx0/wimax_dl_op_msg
+/sys/kernel/debug/wimax:wmx0/wimax_dl_id_table
+/sys/kernel/debug/wimax:wmx0/wimax_dl_debugfs
+
+ By reading the file you can obtain the current value of said debug
+ level; by writing to it, you can set it.
+
+ To increase the debug level of, for example, the i2400m's generic TX
+ engine, just write:
+
+$ echo 3 > /sys/kernel/debug/wimax:wmx0/i2400m/dl_tx
+
+ Increasing numbers yield increasing debug information; for details of
+ what is printed and the available levels, check the source. The code
+ uses 0 for disabled and increasing values until 8.
+
+5.2.2. RX and TX statistics
+
+ The i2400m/rx_stats and i2400m/tx_stats provide statistics about the
+ data reception/delivery from the device:
+
+$ cat /sys/kernel/debug/wimax:wmx0/i2400m/rx_stats
+45 1 3 34 3104 48 480
+
+ The numbers reported are
+ * packets/RX-buffer: total, min, max
+ * RX-buffers: total RX buffers received, accumulated RX buffer size
+ in bytes, min size received, max size received
+
+ Thus, to find the average buffer size received, divide accumulated
+ RX-buffer / total RX-buffers.
+
+ To clear the statistics back to 0, write anything to the rx_stats file:
+
+$ echo 1 > /sys/kernel/debug/wimax:wmx0/i2400m_rx_stats
+
+ Likewise for TX.
+
+ Note the packets this debug file refers to are not network packet, but
+ packets in the sense of the device-specific protocol for communication
+ to the host. See drivers/net/wimax/i2400m/tx.c.
+
+5.2.3. Tracing messages received from user space
+
+ To echo messages received from user space into the trace pipe that the
+ i2400m driver creates, set the debug file i2400m/trace_msg_from_user to
+ 1:
+ *
+$ echo 1 > /sys/kernel/debug/wimax:wmx0/i2400m/trace_msg_from_user
+
+5.2.4. Performing a device reset
+
+ By writing a 0, a 1 or a 2 to the file
+ /sys/kernel/debug/wimax:wmx0/reset, the driver performs a warm (without
+ disconnecting from the bus), cold (disconnecting from the bus) or bus
+ (bus specific) reset on the device.
+
+5.2.5. Asking the device to enter power saving mode
+
+ By writing any value to the /sys/kernel/debug/wimax:wmx0 file, the
+ device will attempt to enter power saving mode.
+
+6. Troubleshooting
+
+6.1. Driver complains about 'i2400m-fw-usb-1.2.sbcf: request failed'
+
+ If upon connecting the device, the following is output in the kernel
+ log:
+
+i2400m_usb 5-4:1.0: fw i2400m-fw-usb-1.3.sbcf: request failed: -2
+
+ This means that the driver cannot locate the firmware file named
+ /lib/firmware/i2400m-fw-usb-1.2.sbcf. Check that the file is present in
+ the right location.
diff --git a/Documentation/wimax/README.wimax b/Documentation/wimax/README.wimax
new file mode 100644
index 00000000000..b78c4378084
--- /dev/null
+++ b/Documentation/wimax/README.wimax
@@ -0,0 +1,81 @@
+
+ Linux kernel WiMAX stack
+
+ (C) 2008 Intel Corporation < linux-wimax@intel.com >
+
+ This provides a basic Linux kernel WiMAX stack to provide a common
+ control API for WiMAX devices, usable from kernel and user space.
+
+1. Design
+
+ The WiMAX stack is designed to provide for common WiMAX control
+ services to current and future WiMAX devices from any vendor.
+
+ Because currently there is only one and we don't know what would be the
+ common services, the APIs it currently provides are very minimal.
+ However, it is done in such a way that it is easily extensible to
+ accommodate future requirements.
+
+ The stack works by embedding a struct wimax_dev in your device's
+ control structures. This provides a set of callbacks that the WiMAX
+ stack will call in order to implement control operations requested by
+ the user. As well, the stack provides API functions that the driver
+ calls to notify about changes of state in the device.
+
+ The stack exports the API calls needed to control the device to user
+ space using generic netlink as a marshalling mechanism. You can access
+ them using your own code or use the wrappers provided for your
+ convenience in libwimax (in the wimax-tools package).
+
+ For detailed information on the stack, please see
+ include/linux/wimax.h.
+
+2. Usage
+
+ For usage in a driver (registration, API, etc) please refer to the
+ instructions in the header file include/linux/wimax.h.
+
+ When a device is registered with the WiMAX stack, a set of debugfs
+ files will appear in /sys/kernel/debug/wimax:wmxX can tweak for
+ control.
+
+2.1. Obtaining debug information: debugfs entries
+
+ The WiMAX stack is compiled, by default, with debug messages that can
+ be used to diagnose issues. By default, said messages are disabled.
+
+ The drivers will register debugfs entries that allow the user to tweak
+ debug settings.
+
+ Each driver, when registering with the stack, will cause a debugfs
+ directory named wimax:DEVICENAME to be created; optionally, it might
+ create more subentries below it.
+
+2.1.1. Increasing debug output
+
+ The files named *dl_* indicate knobs for controlling the debug output
+ of different submodules of the WiMAX stack:
+ *
+# find /sys/kernel/debug/wimax\:wmx0 -name \*dl_\*
+/sys/kernel/debug/wimax:wmx0/wimax_dl_stack
+/sys/kernel/debug/wimax:wmx0/wimax_dl_op_rfkill
+/sys/kernel/debug/wimax:wmx0/wimax_dl_op_reset
+/sys/kernel/debug/wimax:wmx0/wimax_dl_op_msg
+/sys/kernel/debug/wimax:wmx0/wimax_dl_id_table
+/sys/kernel/debug/wimax:wmx0/wimax_dl_debugfs
+/sys/kernel/debug/wimax:wmx0/.... # other driver specific files
+
+ NOTE: Of course, if debugfs is mounted in a directory other than
+ /sys/kernel/debug, those paths will change.
+
+ By reading the file you can obtain the current value of said debug
+ level; by writing to it, you can set it.
+
+ To increase the debug level of, for example, the id-table submodule,
+ just write:
+
+$ echo 3 > /sys/kernel/debug/wimax:wmx0/wimax_dl_id_table
+
+ Increasing numbers yield increasing debug information; for details of
+ what is printed and the available levels, check the source. The code
+ uses 0 for disabled and increasing values until 8.
diff --git a/MAINTAINERS b/MAINTAINERS
index 06f8ff938c7..ee3871e204e 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2305,6 +2305,14 @@ W: http://lists.sourceforge.net/mailman/listinfo/ipw2100-devel
W: http://ipw2200.sourceforge.net
S: Supported
+INTEL WIRELESS WIMAX CONNECTION 2400
+P: Inaky Perez-Gonzalez
+M: inaky.perez-gonzalez@intel.com
+M: linux-wimax@intel.com
+L: wimax@linuxwimax.org
+S: Supported
+W: http://linuxwimax.org
+
INTEL WIRELESS WIFI LINK (iwlwifi)
P: Zhu Yi
M: yi.zhu@intel.com
@@ -2982,6 +2990,7 @@ MUSB MULTIPOINT HIGH SPEED DUAL-ROLE CONTROLLER
P: Felipe Balbi
M: felipe.balbi@nokia.com
L: linux-usb@vger.kernel.org
+T: git gitorious.org:/musb/mainline.git
S: Maintained
MYRICOM MYRI-10G 10GbE DRIVER (MYRI10GE)
@@ -4733,6 +4742,14 @@ M: zaga@fly.cc.fer.hr
L: linux-scsi@vger.kernel.org
S: Maintained
+WIMAX STACK
+P: Inaky Perez-Gonzalez
+M: inaky.perez-gonzalez@intel.com
+M: linux-wimax@intel.com
+L: wimax@linuxwimax.org
+S: Supported
+W: http://linuxwimax.org
+
WIMEDIA LLC PROTOCOL (WLP) SUBSYSTEM
P: David Vrabel
M: david.vrabel@csr.com
diff --git a/arch/arm/plat-mxc/include/mach/usb.h b/arch/arm/plat-mxc/include/mach/usb.h
new file mode 100644
index 00000000000..2dacb3086f1
--- /dev/null
+++ b/arch/arm/plat-mxc/include/mach/usb.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2008 Darius Augulis
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ASM_ARCH_MXC_USB
+#define __ASM_ARCH_MXC_USB
+
+struct imxusb_platform_data {
+ int (*init)(struct device *);
+ int (*exit)(struct device *);
+};
+
+#endif /* __ASM_ARCH_MXC_USB */
diff --git a/arch/arm/plat-omap/usb.c b/arch/arm/plat-omap/usb.c
index 67ca1e216df..add0485703b 100644
--- a/arch/arm/plat-omap/usb.c
+++ b/arch/arm/plat-omap/usb.c
@@ -77,38 +77,6 @@
/*-------------------------------------------------------------------------*/
-#if defined(CONFIG_ARCH_OMAP_OTG) || defined(CONFIG_USB_MUSB_OTG)
-
-static struct otg_transceiver *xceiv;
-
-/**
- * otg_get_transceiver - find the (single) OTG transceiver driver
- *
- * Returns the transceiver driver, after getting a refcount to it; or
- * null if there is no such transceiver. The caller is responsible for
- * releasing that count.
- */
-struct otg_transceiver *otg_get_transceiver(void)
-{
- if (xceiv)
- get_device(xceiv->dev);
- return xceiv;
-}
-EXPORT_SYMBOL(otg_get_transceiver);
-
-int otg_set_transceiver(struct otg_transceiver *x)
-{
- if (xceiv && x)
- return -EBUSY;
- xceiv = x;
- return 0;
-}
-EXPORT_SYMBOL(otg_set_transceiver);
-
-#endif
-
-/*-------------------------------------------------------------------------*/
-
#if defined(CONFIG_ARCH_OMAP_OTG) || defined(CONFIG_ARCH_OMAP15XX)
static void omap2_usb_devconf_clear(u8 port, u32 mask)
diff --git a/arch/powerpc/boot/dts/sequoia.dts b/arch/powerpc/boot/dts/sequoia.dts
index 3b295e8df53..43cc68bd319 100644
--- a/arch/powerpc/boot/dts/sequoia.dts
+++ b/arch/powerpc/boot/dts/sequoia.dts
@@ -134,7 +134,7 @@
};
USB1: usb@e0000400 {
- compatible = "ohci-be";
+ compatible = "ibm,usb-ohci-440epx", "ohci-be";
reg = <0x00000000 0xe0000400 0x00000060>;
interrupt-parent = <&UIC0>;
interrupts = <0x15 0x8>;
diff --git a/drivers/Makefile b/drivers/Makefile
index fceb71a741c..e121b66ef08 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -57,6 +57,7 @@ obj-$(CONFIG_ATA_OVER_ETH) += block/aoe/
obj-$(CONFIG_PARIDE) += block/paride/
obj-$(CONFIG_TC) += tc/
obj-$(CONFIG_UWB) += uwb/
+obj-$(CONFIG_USB_OTG_UTILS) += usb/otg/
obj-$(CONFIG_USB) += usb/
obj-$(CONFIG_USB_MUSB_HDRC) += usb/musb/
obj-$(CONFIG_PCI) += usb/
diff --git a/drivers/block/ub.c b/drivers/block/ub.c
index 048d71d244d..12fb816db7b 100644
--- a/drivers/block/ub.c
+++ b/drivers/block/ub.c
@@ -1579,7 +1579,7 @@ static void ub_reset_task(struct work_struct *work)
struct ub_dev *sc = container_of(work, struct ub_dev, reset_work);
unsigned long flags;
struct ub_lun *lun;
- int lkr, rc;
+ int rc;
if (!sc->reset) {
printk(KERN_WARNING "%s: Running reset unrequested\n",
@@ -1597,10 +1597,11 @@ static void ub_reset_task(struct work_struct *work)
} else if (sc->dev->actconfig->desc.bNumInterfaces != 1) {
;
} else {
- if ((lkr = usb_lock_device_for_reset(sc->dev, sc->intf)) < 0) {
+ rc = usb_lock_device_for_reset(sc->dev, sc->intf);
+ if (rc < 0) {
printk(KERN_NOTICE
"%s: usb_lock_device_for_reset failed (%d)\n",
- sc->name, lkr);
+ sc->name, rc);
} else {
rc = usb_reset_device(sc->dev);
if (rc < 0) {
@@ -1608,9 +1609,7 @@ static void ub_reset_task(struct work_struct *work)
"usb_lock_device_for_reset failed (%d)\n",
sc->name, rc);
}
-
- if (lkr)
- usb_unlock_device(sc->dev);
+ usb_unlock_device(sc->dev);
}
}
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index 03cb494af1c..f0a0f72238a 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -102,7 +102,7 @@ static void hid_reset(struct work_struct *work)
struct usbhid_device *usbhid =
container_of(work, struct usbhid_device, reset_work);
struct hid_device *hid = usbhid->hid;
- int rc_lock, rc = 0;
+ int rc = 0;
if (test_bit(HID_CLEAR_HALT, &usbhid->iofl)) {
dev_dbg(&usbhid->intf->dev, "clear halt\n");
@@ -113,11 +113,10 @@ static void hid_reset(struct work_struct *work)
else if (test_bit(HID_RESET_PENDING, &usbhid->iofl)) {
dev_dbg(&usbhid->intf->dev, "resetting device\n");
- rc = rc_lock = usb_lock_device_for_reset(hid_to_usb_dev(hid), usbhid->intf);
- if (rc_lock >= 0) {
+ rc = usb_lock_device_for_reset(hid_to_usb_dev(hid), usbhid->intf);
+ if (rc == 0) {
rc = usb_reset_device(hid_to_usb_dev(hid));
- if (rc_lock)
- usb_unlock_device(hid_to_usb_dev(hid));
+ usb_unlock_device(hid_to_usb_dev(hid));
}
clear_bit(HID_RESET_PENDING, &usbhid->iofl);
}
diff --git a/drivers/i2c/chips/Kconfig b/drivers/i2c/chips/Kconfig
index 864ac561fdb..59c3d23f5bd 100644
--- a/drivers/i2c/chips/Kconfig
+++ b/drivers/i2c/chips/Kconfig
@@ -114,18 +114,6 @@ config SENSORS_PCF8591
These devices are hard to detect and rarely found on mainstream
hardware. If unsure, say N.
-config ISP1301_OMAP
- tristate "Philips ISP1301 with OMAP OTG"
- depends on ARCH_OMAP_OTG
- help
- If you say yes here you get support for the Philips ISP1301
- USB-On-The-Go transceiver working with the OMAP OTG controller.
- The ISP1301 is used in products including H2 and H3 development
- boards for Texas Instruments OMAP processors.
-
- This driver can also be built as a module. If so, the module
- will be called isp1301_omap.
-
config SENSORS_MAX6875
tristate "Maxim MAX6875 Power supply supervisor"
depends on EXPERIMENTAL
diff --git a/drivers/i2c/chips/Makefile b/drivers/i2c/chips/Makefile
index 8b95f41a500..83accaaf816 100644
--- a/drivers/i2c/chips/Makefile
+++ b/drivers/i2c/chips/Makefile
@@ -18,7 +18,6 @@ obj-$(CONFIG_SENSORS_PCA9539) += pca9539.o
obj-$(CONFIG_SENSORS_PCF8574) += pcf8574.o
obj-$(CONFIG_PCF8575) += pcf8575.o
obj-$(CONFIG_SENSORS_PCF8591) += pcf8591.o
-obj-$(CONFIG_ISP1301_OMAP) += isp1301_omap.o
obj-$(CONFIG_SENSORS_TSL2550) += tsl2550.o
obj-$(CONFIG_MCU_MPC8349EMITX) += mcu_mpc8349emitx.o
diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw.c b/drivers/media/video/pvrusb2/pvrusb2-hdw.c
index 8fb92ac78c7..fa304e5f252 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-hdw.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw.c
@@ -3655,7 +3655,7 @@ void pvr2_hdw_device_reset(struct pvr2_hdw *hdw)
int ret;
pvr2_trace(PVR2_TRACE_INIT,"Performing a device reset...");
ret = usb_lock_device_for_reset(hdw->usb_dev,NULL);
- if (ret == 1) {
+ if (ret == 0) {
ret = usb_reset_device(hdw->usb_dev);
usb_unlock_device(hdw->usb_dev);
} else {
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 9a18270c108..97ea7c60e00 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2614,6 +2614,8 @@ source "drivers/net/tokenring/Kconfig"
source "drivers/net/wireless/Kconfig"
+source "drivers/net/wimax/Kconfig"
+
source "drivers/net/usb/Kconfig"
source "drivers/net/pcmcia/Kconfig"
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index e5c34b46421..a3c5c002f22 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -263,3 +263,4 @@ obj-$(CONFIG_NIU) += niu.o
obj-$(CONFIG_VIRTIO_NET) += virtio_net.o
obj-$(CONFIG_SFC) += sfc/
+obj-$(CONFIG_WIMAX) += wimax/
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index 2ee034f70d1..3073ca25a0b 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -283,9 +283,9 @@ static int kaweth_control(struct kaweth_device *kaweth,
dr->bRequestType= requesttype;
dr->bRequest = request;
- dr->wValue = cpu_to_le16p(&value);
- dr->wIndex = cpu_to_le16p(&index);
- dr->wLength = cpu_to_le16p(&size);
+ dr->wValue = cpu_to_le16(value);
+ dr->wIndex = cpu_to_le16(index);
+ dr->wLength = cpu_to_le16(size);
return kaweth_internal_control_msg(kaweth->dev,
pipe,
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 166880c113d..d9241f1c080 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -150,8 +150,8 @@ static int get_registers(pegasus_t * pegasus, __u16 indx, __u16 size,
pegasus->dr.bRequestType = PEGASUS_REQT_READ;
pegasus->dr.bRequest = PEGASUS_REQ_GET_REGS;
pegasus->dr.wValue = cpu_to_le16(0);
- pegasus->dr.wIndex = cpu_to_le16p(&indx);
- pegasus->dr.wLength = cpu_to_le16p(&size);
+ pegasus->dr.wIndex = cpu_to_le16(indx);
+ pegasus->dr.wLength = cpu_to_le16(size);
pegasus->ctrl_urb->transfer_buffer_length = size;
usb_fill_control_urb(pegasus->ctrl_urb, pegasus->usb,
@@ -208,8 +208,8 @@ static int set_registers(pegasus_t * pegasus, __u16 indx, __u16 size,
pegasus->dr.bRequestType = PEGASUS_REQT_WRITE;
pegasus->dr.bRequest = PEGASUS_REQ_SET_REGS;
pegasus->dr.wValue = cpu_to_le16(0);
- pegasus->dr.wIndex = cpu_to_le16p(&indx);
- pegasus->dr.wLength = cpu_to_le16p(&size);
+ pegasus->dr.wIndex = cpu_to_le16(indx);
+ pegasus->dr.wLength = cpu_to_le16(size);
pegasus->ctrl_urb->transfer_buffer_length = size;
usb_fill_control_urb(pegasus->ctrl_urb, pegasus->usb,
@@ -261,7 +261,7 @@ static int set_register(pegasus_t * pegasus, __u16 indx, __u8 data)
pegasus->dr.bRequestType = PEGASUS_REQT_WRITE;
pegasus->dr.bRequest = PEGASUS_REQ_SET_REG;
pegasus->dr.wValue = cpu_to_le16(data);
- pegasus->dr.wIndex = cpu_to_le16p(&indx);
+ pegasus->dr.wIndex = cpu_to_le16(indx);
pegasus->dr.wLength = cpu_to_le16(1);
pegasus->ctrl_urb->transfer_buffer_length = 1;
@@ -476,7 +476,7 @@ static inline void get_node_id(pegasus_t * pegasus, __u8 * id)
for (i = 0; i < 3; i++) {
read_eprom_word(pegasus, i, &w16);
- ((__le16 *) id)[i] = cpu_to_le16p(&w16);
+ ((__le16 *) id)[i] = cpu_to_le16(w16);
}
}
diff --git a/drivers/net/wimax/Kconfig b/drivers/net/wimax/Kconfig
new file mode 100644
index 00000000000..565018ec1e3
--- /dev/null
+++ b/drivers/net/wimax/Kconfig
@@ -0,0 +1,17 @@
+#
+# WiMAX LAN device drivers configuration
+#
+
+
+comment "Enable WiMAX (Networking options) to see the WiMAX drivers"
+ depends on WIMAX = n
+
+if WIMAX
+
+menu "WiMAX Wireless Broadband devices"
+
+source "drivers/net/wimax/i2400m/Kconfig"
+
+endmenu
+
+endif
diff --git a/drivers/net/wimax/Makefile b/drivers/net/wimax/Makefile
new file mode 100644
index 00000000000..992bc02bc01
--- /dev/null
+++ b/drivers/net/wimax/Makefile
@@ -0,0 +1,5 @@
+
+obj-$(CONFIG_WIMAX_I2400M) += i2400m/
+
+# (from Sam Ravnborg) force kbuild to create built-in.o
+obj- := dummy.o
diff --git a/drivers/net/wimax/i2400m/Kconfig b/drivers/net/wimax/i2400m/Kconfig
new file mode 100644
index 00000000000..d623b3d99a4
--- /dev/null
+++ b/drivers/net/wimax/i2400m/Kconfig
@@ -0,0 +1,49 @@
+
+config WIMAX_I2400M
+ tristate
+ depends on WIMAX
+ select FW_LOADER
+
+comment "Enable USB support to see WiMAX USB drivers"
+ depends on USB = n
+
+comment "Enable MMC support to see WiMAX SDIO drivers"
+ depends on MMC = n
+
+config WIMAX_I2400M_USB
+ tristate "Intel Wireless WiMAX Connection 2400 over USB (including 5x50)"
+ depends on WIMAX && USB
+ select WIMAX_I2400M
+ help
+ Select if you have a device based on the Intel WiMAX
+ Connection 2400 over USB (like any of the Intel Wireless
+ WiMAX/WiFi Link 5x50 series).
+
+ If unsure, it is safe to select M (module).
+
+config WIMAX_I2400M_SDIO
+ tristate "Intel Wireless WiMAX Connection 2400 over SDIO"
+ depends on WIMAX && MMC
+ select WIMAX_I2400M
+ help
+ Select if you have a device based on the Intel WiMAX
+ Connection 2400 over SDIO.
+
+ If unsure, it is safe to select M (module).
+
+config WIMAX_I2400M_DEBUG_LEVEL
+ int "WiMAX i2400m debug level"
+ depends on WIMAX_I2400M
+ default 8
+ help
+
+ Select the maximum debug verbosity level to be compiled into
+ the WiMAX i2400m driver code.
+
+ By default, this is disabled at runtime and can be
+ selectively enabled at runtime for different parts of the
+ code using the sysfs debug-levels file.
+
+ If set at zero, this will compile out all the debug code.
+
+ It is recommended that it is left at 8.
diff --git a/drivers/net/wimax/i2400m/Makefile b/drivers/net/wimax/i2400m/Makefile
new file mode 100644
index 00000000000..1696e936cf5
--- /dev/null
+++ b/drivers/net/wimax/i2400m/Makefile
@@ -0,0 +1,29 @@
+
+obj-$(CONFIG_WIMAX_I2400M) += i2400m.o
+obj-$(CONFIG_WIMAX_I2400M_USB) += i2400m-usb.o
+obj-$(CONFIG_WIMAX_I2400M_SDIO) += i2400m-sdio.o
+
+i2400m-y := \
+ control.o \
+ driver.o \
+ fw.o \
+ op-rfkill.o \
+ netdev.o \
+ tx.o \
+ rx.o
+
+i2400m-$(CONFIG_DEBUG_FS) += debugfs.o
+
+i2400m-usb-y := \
+ usb-fw.o \
+ usb-notif.o \
+ usb-tx.o \
+ usb-rx.o \
+ usb.o
+
+
+i2400m-sdio-y := \
+ sdio.o \
+ sdio-tx.o \
+ sdio-fw.o \
+ sdio-rx.o
diff --git a/drivers/net/wimax/i2400m/control.c b/drivers/net/wimax/i2400m/control.c
new file mode 100644
index 00000000000..d3d37fed689
--- /dev/null
+++ b/drivers/net/wimax/i2400m/control.c
@@ -0,0 +1,1291 @@
+/*
+ * Intel Wireless WiMAX Connection 2400m
+ * Miscellaneous control functions for managing the device
+ *
+ *
+ * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Intel Corporation
+ * Inaky Perez-Gonzalez
+ * - Initial implementation
+ *
+ * This is a collection of functions used to control the device (plus
+ * a few helpers).
+ *
+ * There are utilities for handling TLV buffers, hooks on the device's
+ * reports to act on device changes of state [i2400m_report_hook()],
+ * on acks to commands [i2400m_msg_ack_hook()], a helper for sending
+ * commands to the device and blocking until a reply arrives
+ * [i2400m_msg_to_dev()], a few high level commands for manipulating
+ * the device state, powersving mode and configuration plus the
+ * routines to setup the device once communication is stablished with
+ * it [i2400m_dev_initialize()].
+ *
+ * ROADMAP
+ *
+ * i2400m_dev_initalize() Called by i2400m_dev_start()
+ * i2400m_set_init_config()
+ * i2400m_firmware_check()
+ * i2400m_cmd_get_state()
+ * i2400m_dev_shutdown() Called by i2400m_dev_stop()
+ * i2400m->bus_reset()
+ *
+ * i2400m_{cmd,get,set}_*()
+ * i2400m_msg_to_dev()
+ * i2400m_msg_check_status()
+ *
+ * i2400m_report_hook() Called on reception of an event
+ * i2400m_report_state_hook()
+ * i2400m_tlv_buffer_walk()
+ * i2400m_tlv_match()
+ * i2400m_report_tlv_system_state()
+ * i2400m_report_tlv_rf_switches_status()
+ * i2400m_report_tlv_media_status()
+ * i2400m_cmd_enter_powersave()
+ *
+ * i2400m_msg_ack_hook() Called on reception of a reply to a
+ * command, get or set
+ */
+
+#include
+#include "i2400m.h"
+#include
+#include
+
+
+#define D_SUBMODULE control
+#include "debug-levels.h"
+
+
+/*
+ * Return if a TLV is of a give type and size
+ *
+ * @tlv_hdr: pointer to the TLV
+ * @tlv_type: type of the TLV we are looking for
+ * @tlv_size: expected size of the TLV we are looking for (if -1,
+ * don't check the size). This includes the header
+ * Returns: 0 if the TLV matches
+ * < 0 if it doesn't match at all
+ * > 0 total TLV + payload size, if the type matches, but not
+ * the size
+ */
+static
+ssize_t i2400m_tlv_match(const struct i2400m_tlv_hdr *tlv,
+ enum i2400m_tlv tlv_type, ssize_t tlv_size)
+{
+ if (le16_to_cpu(tlv->type) != tlv_type) /* Not our type? skip */
+ return -1;
+ if (tlv_size != -1
+ && le16_to_cpu(tlv->length) + sizeof(*tlv) != tlv_size) {
+ size_t size = le16_to_cpu(tlv->length) + sizeof(*tlv);
+ printk(KERN_WARNING "W: tlv type 0x%x mismatched because of "
+ "size (got %zu vs %zu expected)\n",
+ tlv_type, size, tlv_size);
+ return size;
+ }
+ return 0;
+}
+
+
+/*
+ * Given a buffer of TLVs, iterate over them
+ *
+ * @i2400m: device instance
+ * @tlv_buf: pointer to the beginning of the TLV buffer
+ * @buf_size: buffer size in bytes
+ * @tlv_pos: seek position; this is assumed to be a pointer returned
+ * by i2400m_tlv_buffer_walk() [and thus, validated]. The
+ * TLV returned will be the one following this one.
+ *
+ * Usage:
+ *
+ * tlv_itr = NULL;
+ * while (tlv_itr = i2400m_tlv_buffer_walk(i2400m, buf, size, tlv_itr)) {
+ * ...
+ * // Do stuff with tlv_itr, DON'T MODIFY IT
+ * ...
+ * }
+ */
+static
+const struct i2400m_tlv_hdr *i2400m_tlv_buffer_walk(
+ struct i2400m *i2400m,
+ const void *tlv_buf, size_t buf_size,
+ const struct i2400m_tlv_hdr *tlv_pos)
+{
+ struct device *dev = i2400m_dev(i2400m);
+ const struct i2400m_tlv_hdr *tlv_top = tlv_buf + buf_size;
+ size_t offset, length, avail_size;
+ unsigned type;
+
+ if (tlv_pos == NULL) /* Take the first one? */
+ tlv_pos = tlv_buf;
+ else /* Nope, the next one */
+ tlv_pos = (void *) tlv_pos
+ + le16_to_cpu(tlv_pos->length) + sizeof(*tlv_pos);
+ if (tlv_pos == tlv_top) { /* buffer done */
+ tlv_pos = NULL;
+ goto error_beyond_end;
+ }
+ if (tlv_pos > tlv_top) {
+ tlv_pos = NULL;
+ WARN_ON(1);
+ goto error_beyond_end;
+ }
+ offset = (void *) tlv_pos - (void *) tlv_buf;
+ avail_size = buf_size - offset;
+ if (avail_size < sizeof(*tlv_pos)) {
+ dev_err(dev, "HW BUG? tlv_buf %p [%zu bytes], tlv @%zu: "
+ "short header\n", tlv_buf, buf_size, offset);
+ goto error_short_header;
+ }
+ type = le16_to_cpu(tlv_pos->type);
+ length = le16_to_cpu(tlv_pos->length);
+ if (avail_size < sizeof(*tlv_pos) + length) {
+ dev_err(dev, "HW BUG? tlv_buf %p [%zu bytes], "
+ "tlv type 0x%04x @%zu: "
+ "short data (%zu bytes vs %zu needed)\n",
+ tlv_buf, buf_size, type, offset, avail_size,
+ sizeof(*tlv_pos) + length);
+ goto error_short_header;
+ }
+error_short_header:
+error_beyond_end:
+ return tlv_pos;
+}
+
+
+/*
+ * Find a TLV in a buffer of sequential TLVs
+ *
+ * @i2400m: device descriptor
+ * @tlv_hdr: pointer to the first TLV in the sequence
+ * @size: size of the buffer in bytes; all TLVs are assumed to fit
+ * fully in the buffer (otherwise we'll complain).
+ * @tlv_type: type of the TLV we are looking for
+ * @tlv_size: expected size of the TLV we are looking for (if -1,
+ * don't check the size). This includes the header
+ *
+ * Returns: NULL if the TLV is not found, otherwise a pointer to
+ * it. If the sizes don't match, an error is printed and NULL
+ * returned.
+ */
+static
+const struct i2400m_tlv_hdr *i2400m_tlv_find(
+ struct i2400m *i2400m,
+ const struct i2400m_tlv_hdr *tlv_hdr, size_t size,
+ enum i2400m_tlv tlv_type, ssize_t tlv_size)
+{
+ ssize_t match;
+ struct device *dev = i2400m_dev(i2400m);
+ const struct i2400m_tlv_hdr *tlv = NULL;
+ while ((tlv = i2400m_tlv_buffer_walk(i2400m, tlv_hdr, size, tlv))) {
+ match = i2400m_tlv_match(tlv, tlv_type, tlv_size);
+ if (match == 0) /* found it :) */
+ break;
+ if (match > 0)
+ dev_warn(dev, "TLV type 0x%04x found with size "
+ "mismatch (%zu vs %zu needed)\n",
+ tlv_type, match, tlv_size);
+ }
+ return tlv;
+}
+
+
+static const struct
+{
+ char *msg;
+ int errno;
+} ms_to_errno[I2400M_MS_MAX] = {
+ [I2400M_MS_DONE_OK] = { "", 0 },
+ [I2400M_MS_DONE_IN_PROGRESS] = { "", 0 },
+ [I2400M_MS_INVALID_OP] = { "invalid opcode", -ENOSYS },
+ [I2400M_MS_BAD_STATE] = { "invalid state", -EILSEQ },
+ [I2400M_MS_ILLEGAL_VALUE] = { "illegal value", -EINVAL },
+ [I2400M_MS_MISSING_PARAMS] = { "missing parameters", -ENOMSG },
+ [I2400M_MS_VERSION_ERROR] = { "bad version", -EIO },
+ [I2400M_MS_ACCESSIBILITY_ERROR] = { "accesibility error", -EIO },
+ [I2400M_MS_BUSY] = { "busy", -EBUSY },
+ [I2400M_MS_CORRUPTED_TLV] = { "corrupted TLV", -EILSEQ },
+ [I2400M_MS_UNINITIALIZED] = { "not unitialized", -EILSEQ },
+ [I2400M_MS_UNKNOWN_ERROR] = { "unknown error", -EIO },
+ [I2400M_MS_PRODUCTION_ERROR] = { "production error", -EIO },
+ [I2400M_MS_NO_RF] = { "no RF", -EIO },
+ [I2400M_MS_NOT_READY_FOR_POWERSAVE] =
+ { "not ready for powersave", -EACCES },
+ [I2400M_MS_THERMAL_CRITICAL] = { "thermal critical", -EL3HLT },
+};
+
+
+/*
+ * i2400m_msg_check_status - translate a message's status code
+ *
+ * @i2400m: device descriptor
+ * @l3l4_hdr: message header
+ * @strbuf: buffer to place a formatted error message (unless NULL).
+ * @strbuf_size: max amount of available space; larger messages will
+ * be truncated.
+ *
+ * Returns: errno code corresponding to the status code in @l3l4_hdr
+ * and a message in @strbuf describing the error.
+ */
+int i2400m_msg_check_status(const struct i2400m_l3l4_hdr *l3l4_hdr,
+ char *strbuf, size_t strbuf_size)
+{
+ int result;
+ enum i2400m_ms status = le16_to_cpu(l3l4_hdr->status);
+ const char *str;
+
+ if (status == 0)
+ return 0;
+ if (status > ARRAY_SIZE(ms_to_errno)) {
+ str = "unknown status code";
+ result = -EBADR;
+ } else {
+ str = ms_to_errno[status].msg;
+ result = ms_to_errno[status].errno;
+ }
+ if (strbuf)
+ snprintf(strbuf, strbuf_size, "%s (%d)", str, status);
+ return result;
+}
+
+
+/*
+ * Act on a TLV System State reported by the device
+ *
+ * @i2400m: device descriptor
+ * @ss: validated System State TLV
+ */
+static
+void i2400m_report_tlv_system_state(struct i2400m *i2400m,
+ const struct i2400m_tlv_system_state *ss)
+{
+ struct device *dev = i2400m_dev(i2400m);
+ struct wimax_dev *wimax_dev = &i2400m->wimax_dev;
+ enum i2400m_system_state i2400m_state = le32_to_cpu(ss->state);
+
+ d_fnstart(3, dev, "(i2400m %p ss %p [%u])\n", i2400m, ss, i2400m_state);
+
+ if (unlikely(i2400m->ready == 0)) /* act if up */
+ goto out;
+ if (i2400m->state != i2400m_state) {
+ i2400m->state = i2400m_state;
+ wake_up_all(&i2400m->state_wq);
+ }
+ switch (i2400m_state) {
+ case I2400M_SS_UNINITIALIZED:
+ case I2400M_SS_INIT:
+ case I2400M_SS_CONFIG:
+ case I2400M_SS_PRODUCTION:
+ wimax_state_change(wimax_dev, WIMAX_ST_UNINITIALIZED);
+ break;
+
+ case I2400M_SS_RF_OFF:
+ case I2400M_SS_RF_SHUTDOWN:
+ wimax_state_change(wimax_dev, WIMAX_ST_RADIO_OFF);
+ break;
+
+ case I2400M_SS_READY:
+ case I2400M_SS_STANDBY:
+ case I2400M_SS_SLEEPACTIVE:
+ wimax_state_change(wimax_dev, WIMAX_ST_READY);
+ break;
+
+ case I2400M_SS_CONNECTING:
+ case I2400M_SS_WIMAX_CONNECTED:
+ wimax_state_change(wimax_dev, WIMAX_ST_READY);
+ break;
+
+ case I2400M_SS_SCAN:
+ case I2400M_SS_OUT_OF_ZONE:
+ wimax_state_change(wimax_dev, WIMAX_ST_SCANNING);
+ break;
+
+ case I2400M_SS_IDLE:
+ d_printf(1, dev, "entering BS-negotiated idle mode\n");
+ case I2400M_SS_DISCONNECTING:
+ case I2400M_SS_DATA_PATH_CONNECTED:
+ wimax_state_change(wimax_dev, WIMAX_ST_CONNECTED);
+ break;
+
+ default:
+ /* Huh? just in case, shut it down */
+ dev_err(dev, "HW BUG? unknown state %u: shutting down\n",
+ i2400m_state);
+ i2400m->bus_reset(i2400m, I2400M_RT_WARM);
+ break;
+ };
+out:
+ d_fnend(3, dev, "(i2400m %p ss %p [%u]) = void\n",
+ i2400m, ss, i2400m_state);
+}
+
+
+/*
+ * Parse and act on a TLV Media Status sent by the device
+ *
+ * @i2400m: device descriptor
+ * @ms: validated Media Status TLV
+ *
+ * This will set the carrier up on down based on the device's link
+ * report. This is done asides of what the WiMAX stack does based on
+ * the device's state as sometimes we need to do a link-renew (the BS
+ * wants us to renew a DHCP lease, for example).
+ *
+ * In fact, doc says that everytime we get a link-up, we should do a
+ * DHCP negotiation...
+ */
+static
+void i2400m_report_tlv_media_status(struct i2400m *i2400m,
+ const struct i2400m_tlv_media_status *ms)
+{
+ struct device *dev = i2400m_dev(i2400m);
+ struct wimax_dev *wimax_dev = &i2400m->wimax_dev;
+ struct net_device *net_dev = wimax_dev->net_dev;
+ enum i2400m_media_status status = le32_to_cpu(ms->media_status);
+
+ d_fnstart(3, dev, "(i2400m %p ms %p [%u])\n", i2400m, ms, status);
+
+ if (unlikely(i2400m->ready == 0)) /* act if up */
+ goto out;
+ switch (status) {
+ case I2400M_MEDIA_STATUS_LINK_UP:
+ netif_carrier_on(net_dev);
+ break;
+ case I2400M_MEDIA_STATUS_LINK_DOWN:
+ netif_carrier_off(net_dev);
+ break;
+ /*
+ * This is the network telling us we need to retrain the DHCP
+ * lease -- so far, we are trusting the WiMAX Network Service
+ * in user space to pick this up and poke the DHCP client.
+ */
+ case I2400M_MEDIA_STATUS_LINK_RENEW:
+ netif_carrier_on(net_dev);
+ break;
+ default:
+ dev_err(dev, "HW BUG? unknown media status %u\n",
+ status);
+ };
+out:
+ d_fnend(3, dev, "(i2400m %p ms %p [%u]) = void\n",
+ i2400m, ms, status);
+}
+
+
+/*
+ * Parse a 'state report' and extract carrier on/off information
+ *
+ * @i2400m: device descriptor
+ * @l3l4_hdr: pointer to message; it has been already validated for
+ * consistent size.
+ * @size: size of the message (header + payload). The header length
+ * declaration is assumed to be congruent with @size (as in
+ * sizeof(*l3l4_hdr) + l3l4_hdr->length == size)
+ *
+ * Extract from the report state the system state TLV and infer from
+ * there if we have a carrier or not. Update our local state and tell
+ * netdev.
+ *
+ * When setting the carrier, it's fine to set OFF twice (for example),
+ * as netif_carrier_off() will not generate two OFF events (just on
+ * the transitions).
+ */
+static
+void i2400m_report_state_hook(struct i2400m *i2400m,
+ const struct i2400m_l3l4_hdr *l3l4_hdr,
+ size_t size, const char *tag)
+{
+ struct device *dev = i2400m_dev(i2400m);
+ const struct i2400m_tlv_hdr *tlv;
+ const struct i2400m_tlv_system_state *ss;
+ const struct i2400m_tlv_rf_switches_status *rfss;
+ const struct i2400m_tlv_media_status *ms;
+ size_t tlv_size = le16_to_cpu(l3l4_hdr->length);
+
+ d_fnstart(4, dev, "(i2400m %p, l3l4_hdr %p, size %zu, %s)\n",
+ i2400m, l3l4_hdr, size, tag);
+ tlv = NULL;
+
+ while ((tlv = i2400m_tlv_buffer_walk(i2400m, &l3l4_hdr->pl,
+ tlv_size, tlv))) {
+ if (0 == i2400m_tlv_match(tlv, I2400M_TLV_SYSTEM_STATE,
+ sizeof(*ss))) {
+ ss = container_of(tlv, typeof(*ss), hdr);
+ d_printf(2, dev, "%s: system state TLV "
+ "found (0x%04x), state 0x%08x\n",
+ tag, I2400M_TLV_SYSTEM_STATE,
+ le32_to_cpu(ss->state));
+ i2400m_report_tlv_system_state(i2400m, ss);
+ }
+ if (0 == i2400m_tlv_match(tlv, I2400M_TLV_RF_STATUS,
+ sizeof(*rfss))) {
+ rfss = container_of(tlv, typeof(*rfss), hdr);
+ d_printf(2, dev, "%s: RF status TLV "
+ "found (0x%04x), sw 0x%02x hw 0x%02x\n",
+ tag, I2400M_TLV_RF_STATUS,
+ le32_to_cpu(rfss->sw_rf_switch),
+ le32_to_cpu(rfss->hw_rf_switch));
+ i2400m_report_tlv_rf_switches_status(i2400m, rfss);
+ }
+ if (0 == i2400m_tlv_match(tlv, I2400M_TLV_MEDIA_STATUS,
+ sizeof(*ms))) {
+ ms = container_of(tlv, typeof(*ms), hdr);
+ d_printf(2, dev, "%s: Media Status TLV: %u\n",
+ tag, le32_to_cpu(ms->media_status));
+ i2400m_report_tlv_media_status(i2400m, ms);
+ }
+ }
+ d_fnend(4, dev, "(i2400m %p, l3l4_hdr %p, size %zu, %s) = void\n",
+ i2400m, l3l4_hdr, size, tag);
+}
+
+
+/*
+ * i2400m_report_hook - (maybe) act on a report
+ *
+ * @i2400m: device descriptor
+ * @l3l4_hdr: pointer to message; it has been already validated for
+ * consistent size.
+ * @size: size of the message (header + payload). The header length
+ * declaration is assumed to be congruent with @size (as in
+ * sizeof(*l3l4_hdr) + l3l4_hdr->length == size)
+ *
+ * Extract information we might need (like carrien on/off) from a
+ * device report.
+ */
+void i2400m_report_hook(struct i2400m *i2400m,
+ const struct i2400m_l3l4_hdr *l3l4_hdr, size_t size)
+{
+ struct device *dev = i2400m_dev(i2400m);
+ unsigned msg_type;
+
+ d_fnstart(3, dev, "(i2400m %p l3l4_hdr %p size %zu)\n",
+ i2400m, l3l4_hdr, size);
+ /* Chew on the message, we might need some information from
+ * here */
+ msg_type = le16_to_cpu(l3l4_hdr->type);
+ switch (msg_type) {
+ case I2400M_MT_REPORT_STATE: /* carrier detection... */
+ i2400m_report_state_hook(i2400m,
+ l3l4_hdr, size, "REPORT STATE");
+ break;
+ /* If the device is ready for power save, then ask it to do
+ * it. */
+ case I2400M_MT_REPORT_POWERSAVE_READY: /* zzzzz */
+ if (l3l4_hdr->status == cpu_to_le16(I2400M_MS_DONE_OK)) {
+ d_printf(1, dev, "ready for powersave, requesting\n");
+ i2400m_cmd_enter_powersave(i2400m);
+ }
+ break;
+ };
+ d_fnend(3, dev, "(i2400m %p l3l4_hdr %p size %zu) = void\n",
+ i2400m, l3l4_hdr, size);
+}
+
+
+/*
+ * i2400m_msg_ack_hook - process cmd/set/get ack for internal status
+ *
+ * @i2400m: device descriptor
+ * @l3l4_hdr: pointer to message; it has been already validated for
+ * consistent size.
+ * @size: size of the message
+ *
+ * Extract information we might need from acks to commands and act on
+ * it. This is akin to i2400m_report_hook(). Note most of this
+ * processing should be done in the function that calls the
+ * command. This is here for some cases where it can't happen...
+ */
+void i2400m_msg_ack_hook(struct i2400m *i2400m,
+ const struct i2400m_l3l4_hdr *l3l4_hdr, size_t size)
+{
+ int result;
+ struct device *dev = i2400m_dev(i2400m);
+ unsigned ack_type, ack_status;
+ char strerr[32];
+
+ /* Chew on the message, we might need some information from
+ * here */
+ ack_type = le16_to_cpu(l3l4_hdr->type);
+ ack_status = le16_to_cpu(l3l4_hdr->status);
+ switch (ack_type) {
+ case I2400M_MT_CMD_ENTER_POWERSAVE:
+ /* This is just left here for the sake of example, as
+ * the processing is done somewhere else. */
+ if (0) {
+ result = i2400m_msg_check_status(
+ l3l4_hdr, strerr, sizeof(strerr));
+ if (result >= 0)
+ d_printf(1, dev, "ready for power save: %zd\n",
+ size);
+ }
+ break;
+ };
+ return;
+}
+
+
+/*
+ * i2400m_msg_size_check() - verify message size and header are congruent
+ *
+ * It is ok if the total message size is larger than the expected
+ * size, as there can be padding.
+ */
+int i2400m_msg_size_check(struct i2400m *i2400m,
+ const struct i2400m_l3l4_hdr *l3l4_hdr,
+ size_t msg_size)
+{
+ int result;
+ struct device *dev = i2400m_dev(i2400m);
+ size_t expected_size;
+ d_fnstart(4, dev, "(i2400m %p l3l4_hdr %p msg_size %zu)\n",
+ i2400m, l3l4_hdr, msg_size);
+ if (msg_size < sizeof(*l3l4_hdr)) {
+ dev_err(dev, "bad size for message header "
+ "(expected at least %zu, got %zu)\n",
+ (size_t) sizeof(*l3l4_hdr), msg_size);
+ result = -EIO;
+ goto error_hdr_size;
+ }
+ expected_size = le16_to_cpu(l3l4_hdr->length) + sizeof(*l3l4_hdr);
+ if (msg_size < expected_size) {
+ dev_err(dev, "bad size for message code 0x%04x (expected %zu, "
+ "got %zu)\n", le16_to_cpu(l3l4_hdr->type),
+ expected_size, msg_size);
+ result = -EIO;
+ } else
+ result = 0;
+error_hdr_size:
+ d_fnend(4, dev,
+ "(i2400m %p l3l4_hdr %p msg_size %zu) = %d\n",
+ i2400m, l3l4_hdr, msg_size, result);
+ return result;
+}
+
+
+
+/*
+ * Cancel a wait for a command ACK
+ *
+ * @i2400m: device descriptor
+ * @code: [negative] errno code to cancel with (don't use
+ * -EINPROGRESS)
+ *
+ * If there is an ack already filled out, free it.
+ */
+void i2400m_msg_to_dev_cancel_wait(struct i2400m *i2400m, int code)
+{
+ struct sk_buff *ack_skb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&i2400m->rx_lock, flags);
+ ack_skb = i2400m->ack_skb;
+ if (ack_skb && !IS_ERR(ack_skb))
+ kfree(ack_skb);
+ i2400m->ack_skb = ERR_PTR(code);
+ spin_unlock_irqrestore(&i2400m->rx_lock, flags);
+}
+
+
+/**
+ * i2400m_msg_to_dev - Send a control message to the device and get a response
+ *
+ * @i2400m: device descriptor
+ *
+ * @msg_skb: an skb *
+ *
+ * @buf: pointer to the buffer containing the message to be sent; it
+ * has to start with a &struct i2400M_l3l4_hdr and then
+ * followed by the payload. Once this function returns, the
+ * buffer can be reused.
+ *
+ * @buf_len: buffer size
+ *
+ * Returns:
+ *
+ * Pointer to skb containing the ack message. You need to check the
+ * pointer with IS_ERR(), as it might be an error code. Error codes
+ * could happen because:
+ *
+ * - the message wasn't formatted correctly
+ * - couldn't send the message
+ * - failed waiting for a response
+ * - the ack message wasn't formatted correctly
+ *
+ * The returned skb has been allocated with wimax_msg_to_user_alloc(),
+ * it contains the reponse in a netlink attribute and is ready to be
+ * passed up to user space with wimax_msg_to_user_send(). To access
+ * the payload and its length, use wimax_msg_{data,len}() on the skb.
+ *
+ * The skb has to be freed with kfree_skb() once done.
+ *
+ * Description:
+ *
+ * This function delivers a message/command to the device and waits
+ * for an ack to be received. The format is described in
+ * linux/wimax/i2400m.h. In summary, a command/get/set is followed by an
+ * ack.
+ *
+ * This function will not check the ack status, that's left up to the
+ * caller. Once done with the ack skb, it has to be kfree_skb()ed.
+ *
+ * The i2400m handles only one message at the same time, thus we need
+ * the mutex to exclude other players.
+ *
+ * We write the message and then wait for an answer to come back. The
+ * RX path intercepts control messages and handles them in
+ * i2400m_rx_ctl(). Reports (notifications) are (maybe) processed
+ * locally and then forwarded (as needed) to user space on the WiMAX
+ * stack message pipe. Acks are saved and passed back to us through an
+ * skb in i2400m->ack_skb which is ready to be given to generic
+ * netlink if need be.
+ */
+struct sk_buff *i2400m_msg_to_dev(struct i2400m *i2400m,
+ const void *buf, size_t buf_len)
+{
+ int result;
+ struct device *dev = i2400m_dev(i2400m);
+ const struct i2400m_l3l4_hdr *msg_l3l4_hdr;
+ struct sk_buff *ack_skb;
+ const struct i2400m_l3l4_hdr *ack_l3l4_hdr;
+ size_t ack_len;
+ int ack_timeout;
+ unsigned msg_type;
+ unsigned long flags;
+
+ d_fnstart(3, dev, "(i2400m %p buf %p len %zu)\n",
+ i2400m, buf, buf_len);
+
+ if (i2400m->boot_mode)
+ return ERR_PTR(-ENODEV);
+
+ msg_l3l4_hdr = buf;
+ /* Check msg & payload consistency */
+ result = i2400m_msg_size_check(i2400m, msg_l3l4_hdr, buf_len);
+ if (result < 0)
+ goto error_bad_msg;
+ msg_type = le16_to_cpu(msg_l3l4_hdr->type);
+ d_printf(1, dev, "CMD/GET/SET 0x%04x %zu bytes\n",
+ msg_type, buf_len);
+ d_dump(2, dev, buf, buf_len);
+
+ /* Setup the completion, ack_skb ("we are waiting") and send
+ * the message to the device */
+ mutex_lock(&i2400m->msg_mutex);
+ spin_lock_irqsave(&i2400m->rx_lock, flags);
+ i2400m->ack_skb = ERR_PTR(-EINPROGRESS);
+ spin_unlock_irqrestore(&i2400m->rx_lock, flags);
+ init_completion(&i2400m->msg_completion);
+ result = i2400m_tx(i2400m, buf, buf_len, I2400M_PT_CTRL);
+ if (result < 0) {
+ dev_err(dev, "can't send message 0x%04x: %d\n",
+ le16_to_cpu(msg_l3l4_hdr->type), result);
+ goto error_tx;
+ }
+
+ /* Some commands take longer to execute because of crypto ops,
+ * so we give them some more leeway on timeout */
+ switch (msg_type) {
+ case I2400M_MT_GET_TLS_OPERATION_RESULT:
+ case I2400M_MT_CMD_SEND_EAP_RESPONSE:
+ ack_timeout = 5 * HZ;
+ break;
+ default:
+ ack_timeout = HZ;
+ };
+
+ /* The RX path in rx.c will put any response for this message
+ * in i2400m->ack_skb and wake us up. If we cancel the wait,
+ * we need to change the value of i2400m->ack_skb to something
+ * not -EINPROGRESS so RX knows there is no one waiting. */
+ result = wait_for_completion_interruptible_timeout(
+ &i2400m->msg_completion, ack_timeout);
+ if (result == 0) {
+ dev_err(dev, "timeout waiting for reply to message 0x%04x\n",
+ msg_type);
+ result = -ETIMEDOUT;
+ i2400m_msg_to_dev_cancel_wait(i2400m, result);
+ goto error_wait_for_completion;
+ } else if (result < 0) {
+ dev_err(dev, "error waiting for reply to message 0x%04x: %d\n",
+ msg_type, result);
+ i2400m_msg_to_dev_cancel_wait(i2400m, result);
+ goto error_wait_for_completion;
+ }
+
+ /* Pull out the ack data from i2400m->ack_skb -- see if it is
+ * an error and act accordingly */
+ spin_lock_irqsave(&i2400m->rx_lock, flags);
+ ack_skb = i2400m->ack_skb;
+ if (IS_ERR(ack_skb))
+ result = PTR_ERR(ack_skb);
+ else
+ result = 0;
+ i2400m->ack_skb = NULL;
+ spin_unlock_irqrestore(&i2400m->rx_lock, flags);
+ if (result < 0)
+ goto error_ack_status;
+ ack_l3l4_hdr = wimax_msg_data_len(ack_skb, &ack_len);
+
+ /* Check the ack and deliver it if it is ok */
+ result = i2400m_msg_size_check(i2400m, ack_l3l4_hdr, ack_len);
+ if (result < 0) {
+ dev_err(dev, "HW BUG? reply to message 0x%04x: %d\n",
+ msg_type, result);
+ goto error_bad_ack_len;
+ }
+ if (msg_type != le16_to_cpu(ack_l3l4_hdr->type)) {
+ dev_err(dev, "HW BUG? bad reply 0x%04x to message 0x%04x\n",
+ le16_to_cpu(ack_l3l4_hdr->type), msg_type);
+ result = -EIO;
+ goto error_bad_ack_type;
+ }
+ i2400m_msg_ack_hook(i2400m, ack_l3l4_hdr, ack_len);
+ mutex_unlock(&i2400m->msg_mutex);
+ d_fnend(3, dev, "(i2400m %p buf %p len %zu) = %p\n",
+ i2400m, buf, buf_len, ack_skb);
+ return ack_skb;
+
+error_bad_ack_type:
+error_bad_ack_len:
+ kfree_skb(ack_skb);
+error_ack_status:
+error_wait_for_completion:
+error_tx:
+ mutex_unlock(&i2400m->msg_mutex);
+error_bad_msg:
+ d_fnend(3, dev, "(i2400m %p buf %p len %zu) = %d\n",
+ i2400m, buf, buf_len, result);
+ return ERR_PTR(result);
+}
+
+
+/*
+ * Definitions for the Enter Power Save command
+ *
+ * The Enter Power Save command requests the device to go into power
+ * saving mode. The device will ack or nak the command depending on it
+ * being ready for it. If it acks, we tell the USB subsystem to
+ *
+ * As well, the device might request to go into power saving mode by
+ * sending a report (REPORT_POWERSAVE_READY), in which case, we issue
+ * this command. The hookups in the RX coder allow
+ */
+enum {
+ I2400M_WAKEUP_ENABLED = 0x01,
+ I2400M_WAKEUP_DISABLED = 0x02,
+ I2400M_TLV_TYPE_WAKEUP_MODE = 144,
+};
+
+struct i2400m_cmd_enter_power_save {
+ struct i2400m_l3l4_hdr hdr;
+ struct i2400m_tlv_hdr tlv;
+ __le32 val;
+} __attribute__((packed));
+
+
+/*
+ * Request entering power save
+ *
+ * This command is (mainly) executed when the device indicates that it
+ * is ready to go into powersave mode via a REPORT_POWERSAVE_READY.
+ */
+int i2400m_cmd_enter_powersave(struct i2400m *i2400m)
+{
+ int result;
+ struct device *dev = i2400m_dev(i2400m);
+ struct sk_buff *ack_skb;
+ struct i2400m_cmd_enter_power_save *cmd;
+ char strerr[32];
+
+ result = -ENOMEM;
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL)
+ goto error_alloc;
+ cmd->hdr.type = cpu_to_le16(I2400M_MT_CMD_ENTER_POWERSAVE);
+ cmd->hdr.length = cpu_to_le16(sizeof(*cmd) - sizeof(cmd->hdr));
+ cmd->hdr.version = cpu_to_le16(I2400M_L3L4_VERSION);
+ cmd->tlv.type = cpu_to_le16(I2400M_TLV_TYPE_WAKEUP_MODE);
+ cmd->tlv.length = cpu_to_le16(sizeof(cmd->val));
+ cmd->val = cpu_to_le32(I2400M_WAKEUP_ENABLED);
+
+ ack_skb = i2400m_msg_to_dev(i2400m, cmd, sizeof(*cmd));
+ result = PTR_ERR(ack_skb);
+ if (IS_ERR(ack_skb)) {
+ dev_err(dev, "Failed to issue 'Enter power save' command: %d\n",
+ result);
+ goto error_msg_to_dev;
+ }
+ result = i2400m_msg_check_status(wimax_msg_data(ack_skb),
+ strerr, sizeof(strerr));
+ if (result == -EACCES)
+ d_printf(1, dev, "Cannot enter power save mode\n");
+ else if (result < 0)
+ dev_err(dev, "'Enter power save' (0x%04x) command failed: "
+ "%d - %s\n", I2400M_MT_CMD_ENTER_POWERSAVE,
+ result, strerr);
+ else
+ d_printf(1, dev, "device ready to power save\n");
+ kfree_skb(ack_skb);
+error_msg_to_dev:
+ kfree(cmd);
+error_alloc:
+ return result;
+}
+EXPORT_SYMBOL_GPL(i2400m_cmd_enter_powersave);
+
+
+/*
+ * Definitions for getting device information
+ */
+enum {
+ I2400M_TLV_DETAILED_DEVICE_INFO = 140
+};
+
+/**
+ * i2400m_get_device_info - Query the device for detailed device information
+ *
+ * @i2400m: device descriptor
+ *
+ * Returns: an skb whose skb->data points to a 'struct
+ * i2400m_tlv_detailed_device_info'. When done, kfree_skb() it. The
+ * skb is *guaranteed* to contain the whole TLV data structure.
+ *
+ * On error, IS_ERR(skb) is true and ERR_PTR(skb) is the error
+ * code.
+ */
+struct sk_buff *i2400m_get_device_info(struct i2400m *i2400m)
+{
+ int result;
+ struct device *dev = i2400m_dev(i2400m);
+ struct sk_buff *ack_skb;
+ struct i2400m_l3l4_hdr *cmd;
+ const struct i2400m_l3l4_hdr *ack;
+ size_t ack_len;
+ const struct i2400m_tlv_hdr *tlv;
+ const struct i2400m_tlv_detailed_device_info *ddi;
+ char strerr[32];
+
+ ack_skb = ERR_PTR(-ENOMEM);
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL)
+ goto error_alloc;
+ cmd->type = cpu_to_le16(I2400M_MT_GET_DEVICE_INFO);
+ cmd->length = 0;
+ cmd->version = cpu_to_le16(I2400M_L3L4_VERSION);
+
+ ack_skb = i2400m_msg_to_dev(i2400m, cmd, sizeof(*cmd));
+ if (IS_ERR(ack_skb)) {
+ dev_err(dev, "Failed to issue 'get device info' command: %ld\n",
+ PTR_ERR(ack_skb));
+ goto error_msg_to_dev;
+ }
+ ack = wimax_msg_data_len(ack_skb, &ack_len);
+ result = i2400m_msg_check_status(ack, strerr, sizeof(strerr));
+ if (result < 0) {
+ dev_err(dev, "'get device info' (0x%04x) command failed: "
+ "%d - %s\n", I2400M_MT_GET_DEVICE_INFO, result,
+ strerr);
+ goto error_cmd_failed;
+ }
+ tlv = i2400m_tlv_find(i2400m, ack->pl, ack_len - sizeof(*ack),
+ I2400M_TLV_DETAILED_DEVICE_INFO, sizeof(*ddi));
+ if (tlv == NULL) {
+ dev_err(dev, "GET DEVICE INFO: "
+ "detailed device info TLV not found (0x%04x)\n",
+ I2400M_TLV_DETAILED_DEVICE_INFO);
+ result = -EIO;
+ goto error_no_tlv;
+ }
+ skb_pull(ack_skb, (void *) tlv - (void *) ack_skb->data);
+error_msg_to_dev:
+ kfree(cmd);
+error_alloc:
+ return ack_skb;
+
+error_no_tlv:
+error_cmd_failed:
+ kfree_skb(ack_skb);
+ kfree(cmd);
+ return ERR_PTR(result);
+}
+
+
+/* Firmware interface versions we support */
+enum {
+ I2400M_HDIv_MAJOR = 9,
+ I2400M_HDIv_MAJOR_2 = 8,
+ I2400M_HDIv_MINOR = 1,
+};
+
+
+/**
+ * i2400m_firmware_check - check firmware versions are compatible with
+ * the driver
+ *
+ * @i2400m: device descriptor
+ *
+ * Returns: 0 if ok, < 0 errno code an error and a message in the
+ * kernel log.
+ *
+ * Long function, but quite simple; first chunk launches the command
+ * and double checks the reply for the right TLV. Then we process the
+ * TLV (where the meat is).
+ */
+int i2400m_firmware_check(struct i2400m *i2400m)
+{
+ int result;
+ struct device *dev = i2400m_dev(i2400m);
+ struct sk_buff *ack_skb;
+ struct i2400m_l3l4_hdr *cmd;
+ const struct i2400m_l3l4_hdr *ack;
+ size_t ack_len;
+ const struct i2400m_tlv_hdr *tlv;
+ const struct i2400m_tlv_l4_message_versions *l4mv;
+ char strerr[32];
+ unsigned major, minor, branch;
+
+ result = -ENOMEM;
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL)
+ goto error_alloc;
+ cmd->type = cpu_to_le16(I2400M_MT_GET_LM_VERSION);
+ cmd->length = 0;
+ cmd->version = cpu_to_le16(I2400M_L3L4_VERSION);
+
+ ack_skb = i2400m_msg_to_dev(i2400m, cmd, sizeof(*cmd));
+ if (IS_ERR(ack_skb)) {
+ result = PTR_ERR(ack_skb);
+ dev_err(dev, "Failed to issue 'get lm version' command: %-d\n",
+ result);
+ goto error_msg_to_dev;
+ }
+ ack = wimax_msg_data_len(ack_skb, &ack_len);
+ result = i2400m_msg_check_status(ack, strerr, sizeof(strerr));
+ if (result < 0) {
+ dev_err(dev, "'get lm version' (0x%04x) command failed: "
+ "%d - %s\n", I2400M_MT_GET_LM_VERSION, result,
+ strerr);
+ goto error_cmd_failed;
+ }
+ tlv = i2400m_tlv_find(i2400m, ack->pl, ack_len - sizeof(*ack),
+ I2400M_TLV_L4_MESSAGE_VERSIONS, sizeof(*l4mv));
+ if (tlv == NULL) {
+ dev_err(dev, "get lm version: TLV not found (0x%04x)\n",
+ I2400M_TLV_L4_MESSAGE_VERSIONS);
+ result = -EIO;
+ goto error_no_tlv;
+ }
+ l4mv = container_of(tlv, typeof(*l4mv), hdr);
+ major = le16_to_cpu(l4mv->major);
+ minor = le16_to_cpu(l4mv->minor);
+ branch = le16_to_cpu(l4mv->branch);
+ result = -EINVAL;
+ if (major != I2400M_HDIv_MAJOR
+ && major != I2400M_HDIv_MAJOR_2) {
+ dev_err(dev, "unsupported major fw interface version "
+ "%u.%u.%u\n", major, minor, branch);
+ goto error_bad_major;
+ }
+ if (major == I2400M_HDIv_MAJOR_2)
+ dev_err(dev, "deprecated major fw interface version "
+ "%u.%u.%u\n", major, minor, branch);
+ result = 0;
+ if (minor != I2400M_HDIv_MINOR)
+ dev_warn(dev, "untested minor fw firmware version %u.%u.%u\n",
+ major, minor, branch);
+error_bad_major:
+ dev_info(dev, "firmware interface version %u.%u.%u\n",
+ major, minor, branch);
+error_no_tlv:
+error_cmd_failed:
+ kfree_skb(ack_skb);
+error_msg_to_dev:
+ kfree(cmd);
+error_alloc:
+ return result;
+}
+
+
+/*
+ * Send an DoExitIdle command to the device to ask it to go out of
+ * basestation-idle mode.
+ *
+ * @i2400m: device descriptor
+ *
+ * This starts a renegotiation with the basestation that might involve
+ * another crypto handshake with user space.
+ *
+ * Returns: 0 if ok, < 0 errno code on error.
+ */
+int i2400m_cmd_exit_idle(struct i2400m *i2400m)
+{
+ int result;
+ struct device *dev = i2400m_dev(i2400m);
+ struct sk_buff *ack_skb;
+ struct i2400m_l3l4_hdr *cmd;
+ char strerr[32];
+
+ result = -ENOMEM;
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL)
+ goto error_alloc;
+ cmd->type = cpu_to_le16(I2400M_MT_CMD_EXIT_IDLE);
+ cmd->length = 0;
+ cmd->version = cpu_to_le16(I2400M_L3L4_VERSION);
+
+ ack_skb = i2400m_msg_to_dev(i2400m, cmd, sizeof(*cmd));
+ result = PTR_ERR(ack_skb);
+ if (IS_ERR(ack_skb)) {
+ dev_err(dev, "Failed to issue 'exit idle' command: %d\n",
+ result);
+ goto error_msg_to_dev;
+ }
+ result = i2400m_msg_check_status(wimax_msg_data(ack_skb),
+ strerr, sizeof(strerr));
+ kfree_skb(ack_skb);
+error_msg_to_dev:
+ kfree(cmd);
+error_alloc:
+ return result;
+
+}
+
+
+/*
+ * Query the device for its state, update the WiMAX stack's idea of it
+ *
+ * @i2400m: device descriptor
+ *
+ * Returns: 0 if ok, < 0 errno code on error.
+ *
+ * Executes a 'Get State' command and parses the returned
+ * TLVs.
+ *
+ * Because this is almost identical to a 'Report State', we use
+ * i2400m_report_state_hook() to parse the answer. This will set the
+ * carrier state, as well as the RF Kill switches state.
+ */
+int i2400m_cmd_get_state(struct i2400m *i2400m)
+{
+ int result;
+ struct device *dev = i2400m_dev(i2400m);
+ struct sk_buff *ack_skb;
+ struct i2400m_l3l4_hdr *cmd;
+ const struct i2400m_l3l4_hdr *ack;
+ size_t ack_len;
+ char strerr[32];
+
+ result = -ENOMEM;
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL)
+ goto error_alloc;
+ cmd->type = cpu_to_le16(I2400M_MT_GET_STATE);
+ cmd->length = 0;
+ cmd->version = cpu_to_le16(I2400M_L3L4_VERSION);
+
+ ack_skb = i2400m_msg_to_dev(i2400m, cmd, sizeof(*cmd));
+ if (IS_ERR(ack_skb)) {
+ dev_err(dev, "Failed to issue 'get state' command: %ld\n",
+ PTR_ERR(ack_skb));
+ result = PTR_ERR(ack_skb);
+ goto error_msg_to_dev;
+ }
+ ack = wimax_msg_data_len(ack_skb, &ack_len);
+ result = i2400m_msg_check_status(ack, strerr, sizeof(strerr));
+ if (result < 0) {
+ dev_err(dev, "'get state' (0x%04x) command failed: "
+ "%d - %s\n", I2400M_MT_GET_STATE, result, strerr);
+ goto error_cmd_failed;
+ }
+ i2400m_report_state_hook(i2400m, ack, ack_len - sizeof(*ack),
+ "GET STATE");
+ result = 0;
+ kfree_skb(ack_skb);
+error_cmd_failed:
+error_msg_to_dev:
+ kfree(cmd);
+error_alloc:
+ return result;
+}
+EXPORT_SYMBOL_GPL(i2400m_cmd_get_state);
+
+
+/**
+ * Set basic configuration settings
+ *
+ * @i2400m: device descriptor
+ * @args: array of pointers to the TLV headers to send for
+ * configuration (each followed by its payload).
+ * TLV headers and payloads must be properly initialized, with the
+ * right endianess (LE).
+ * @arg_size: number of pointers in the @args array
+ */
+int i2400m_set_init_config(struct i2400m *i2400m,
+ const struct i2400m_tlv_hdr **arg, size_t args)
+{
+ int result;
+ struct device *dev = i2400m_dev(i2400m);
+ struct sk_buff *ack_skb;
+ struct i2400m_l3l4_hdr *cmd;
+ char strerr[32];
+ unsigned argc, argsize, tlv_size;
+ const struct i2400m_tlv_hdr *tlv_hdr;
+ void *buf, *itr;
+
+ d_fnstart(3, dev, "(i2400m %p arg %p args %zu)\n", i2400m, arg, args);
+ result = 0;
+ if (args == 0)
+ goto none;
+ /* Compute the size of all the TLVs, so we can alloc a
+ * contiguous command block to copy them. */
+ argsize = 0;
+ for (argc = 0; argc < args; argc++) {
+ tlv_hdr = arg[argc];
+ argsize += sizeof(*tlv_hdr) + le16_to_cpu(tlv_hdr->length);
+ }
+ WARN_ON(argc >= 9); /* As per hw spec */
+
+ /* Alloc the space for the command and TLVs*/
+ result = -ENOMEM;
+ buf = kzalloc(sizeof(*cmd) + argsize, GFP_KERNEL);
+ if (buf == NULL)
+ goto error_alloc;
+ cmd = buf;
+ cmd->type = cpu_to_le16(I2400M_MT_SET_INIT_CONFIG);
+ cmd->length = cpu_to_le16(argsize);
+ cmd->version = cpu_to_le16(I2400M_L3L4_VERSION);
+
+ /* Copy the TLVs */
+ itr = buf + sizeof(*cmd);
+ for (argc = 0; argc < args; argc++) {
+ tlv_hdr = arg[argc];
+ tlv_size = sizeof(*tlv_hdr) + le16_to_cpu(tlv_hdr->length);
+ memcpy(itr, tlv_hdr, tlv_size);
+ itr += tlv_size;
+ }
+
+ /* Send the message! */
+ ack_skb = i2400m_msg_to_dev(i2400m, buf, sizeof(*cmd) + argsize);
+ result = PTR_ERR(ack_skb);
+ if (IS_ERR(ack_skb)) {
+ dev_err(dev, "Failed to issue 'init config' command: %d\n",
+ result);
+
+ goto error_msg_to_dev;
+ }
+ result = i2400m_msg_check_status(wimax_msg_data(ack_skb),
+ strerr, sizeof(strerr));
+ if (result < 0)
+ dev_err(dev, "'init config' (0x%04x) command failed: %d - %s\n",
+ I2400M_MT_SET_INIT_CONFIG, result, strerr);
+ kfree_skb(ack_skb);
+error_msg_to_dev:
+ kfree(buf);
+error_alloc:
+none:
+ d_fnend(3, dev, "(i2400m %p arg %p args %zu) = %d\n",
+ i2400m, arg, args, result);
+ return result;
+
+}
+EXPORT_SYMBOL_GPL(i2400m_set_init_config);
+
+
+/**
+ * i2400m_dev_initialize - Initialize the device once communications are ready
+ *
+ * @i2400m: device descriptor
+ *
+ * Returns: 0 if ok, < 0 errno code on error.
+ *
+ * Configures the device to work the way we like it.
+ *
+ * At the point of this call, the device is registered with the WiMAX
+ * and netdev stacks, firmware is uploaded and we can talk to the
+ * device normally.
+ */
+int i2400m_dev_initialize(struct i2400m *i2400m)
+{
+ int result;
+ struct device *dev = i2400m_dev(i2400m);
+ struct i2400m_tlv_config_idle_parameters idle_params;
+ const struct i2400m_tlv_hdr *args[9];
+ unsigned argc = 0;
+
+ d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
+ /* Useless for now...might change */
+ if (i2400m_idle_mode_disabled) {
+ idle_params.hdr.type =
+ cpu_to_le16(I2400M_TLV_CONFIG_IDLE_PARAMETERS);
+ idle_params.hdr.length = cpu_to_le16(
+ sizeof(idle_params) - sizeof(idle_params.hdr));
+ idle_params.idle_timeout = 0;
+ idle_params.idle_paging_interval = 0;
+ args[argc++] = &idle_params.hdr;
+ }
+ result = i2400m_set_init_config(i2400m, args, argc);
+ if (result < 0)
+ goto error;
+ result = i2400m_firmware_check(i2400m); /* fw versions ok? */
+ if (result < 0)
+ goto error;
+ /*
+ * Update state: Here it just calls a get state; parsing the
+ * result (System State TLV and RF Status TLV [done in the rx
+ * path hooks]) will set the hardware and software RF-Kill
+ * status.
+ */
+ result = i2400m_cmd_get_state(i2400m);
+error:
+ d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result);
+ return result;
+}
+
+
+/**
+ * i2400m_dev_shutdown - Shutdown a running device
+ *
+ * @i2400m: device descriptor
+ *
+ * Gracefully stops the device, moving it to the lowest power
+ * consumption state possible.
+ */
+void i2400m_dev_shutdown(struct i2400m *i2400m)
+{
+ int result = -ENODEV;
+ struct device *dev = i2400m_dev(i2400m);
+
+ d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
+ result = i2400m->bus_reset(i2400m, I2400M_RT_WARM);
+ d_fnend(3, dev, "(i2400m %p) = void [%d]\n", i2400m, result);
+ return;
+}
diff --git a/drivers/net/wimax/i2400m/debug-levels.h b/drivers/net/wimax/i2400m/debug-levels.h
new file mode 100644
index 00000000000..3183baa16a5
--- /dev/null
+++ b/drivers/net/wimax/i2400m/debug-levels.h
@@ -0,0 +1,45 @@
+/*
+ * Intel Wireless WiMAX Connection 2400m
+ * Debug levels control file for the i2400m module
+ *
+ *
+ * Copyright (C) 2007-2008 Intel Corporation
+ * Inaky Perez-Gonzalez
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+#ifndef __debug_levels__h__
+#define __debug_levels__h__
+
+/* Maximum compile and run time debug level for all submodules */
+#define D_MODULENAME i2400m
+#define D_MASTER CONFIG_WIMAX_I2400M_DEBUG_LEVEL
+
+#include
+
+/* List of all the enabled modules */
+enum d_module {
+ D_SUBMODULE_DECLARE(control),
+ D_SUBMODULE_DECLARE(driver),
+ D_SUBMODULE_DECLARE(debugfs),
+ D_SUBMODULE_DECLARE(fw),
+ D_SUBMODULE_DECLARE(netdev),
+ D_SUBMODULE_DECLARE(rfkill),
+ D_SUBMODULE_DECLARE(rx),
+ D_SUBMODULE_DECLARE(tx),
+};
+
+
+#endif /* #ifndef __debug_levels__h__ */
diff --git a/drivers/net/wimax/i2400m/debugfs.c b/drivers/net/wimax/i2400m/debugfs.c
new file mode 100644
index 00000000000..62663298597
--- /dev/null
+++ b/drivers/net/wimax/i2400m/debugfs.c
@@ -0,0 +1,392 @@
+/*
+ * Intel Wireless WiMAX Connection 2400m
+ * Debugfs interfaces to manipulate driver and device information
+ *
+ *
+ * Copyright (C) 2007 Intel Corporation
+ * Inaky Perez-Gonzalez
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include "i2400m.h"
+
+
+#define D_SUBMODULE debugfs
+#include "debug-levels.h"
+
+static
+int debugfs_netdev_queue_stopped_get(void *data, u64 *val)
+{
+ struct i2400m *i2400m = data;
+ *val = netif_queue_stopped(i2400m->wimax_dev.net_dev);
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(fops_netdev_queue_stopped,
+ debugfs_netdev_queue_stopped_get,
+ NULL, "%llu\n");
+
+
+static
+struct dentry *debugfs_create_netdev_queue_stopped(
+ const char *name, struct dentry *parent, struct i2400m *i2400m)
+{
+ return debugfs_create_file(name, 0400, parent, i2400m,
+ &fops_netdev_queue_stopped);
+}
+
+
+/*
+ * inode->i_private has the @data argument to debugfs_create_file()
+ */
+static
+int i2400m_stats_open(struct inode *inode, struct file *filp)
+{
+ filp->private_data = inode->i_private;
+ return 0;
+}
+
+/*
+ * We don't allow partial reads of this file, as then the reader would
+ * get weirdly confused data as it is updated.
+ *
+ * So or you read it all or nothing; if you try to read with an offset
+ * != 0, we consider you are done reading.
+ */
+static
+ssize_t i2400m_rx_stats_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct i2400m *i2400m = filp->private_data;
+ char buf[128];
+ unsigned long flags;
+
+ if (*ppos != 0)
+ return 0;
+ if (count < sizeof(buf))
+ return -ENOSPC;
+ spin_lock_irqsave(&i2400m->rx_lock, flags);
+ snprintf(buf, sizeof(buf), "%u %u %u %u %u %u %u\n",
+ i2400m->rx_pl_num, i2400m->rx_pl_min,
+ i2400m->rx_pl_max, i2400m->rx_num,
+ i2400m->rx_size_acc,
+ i2400m->rx_size_min, i2400m->rx_size_max);
+ spin_unlock_irqrestore(&i2400m->rx_lock, flags);
+ return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
+}
+
+
+/* Any write clears the stats */
+static
+ssize_t i2400m_rx_stats_write(struct file *filp, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct i2400m *i2400m = filp->private_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&i2400m->rx_lock, flags);
+ i2400m->rx_pl_num = 0;
+ i2400m->rx_pl_max = 0;
+ i2400m->rx_pl_min = UINT_MAX;
+ i2400m->rx_num = 0;
+ i2400m->rx_size_acc = 0;
+ i2400m->rx_size_min = UINT_MAX;
+ i2400m->rx_size_max = 0;
+ spin_unlock_irqrestore(&i2400m->rx_lock, flags);
+ return count;
+}
+
+static
+const struct file_operations i2400m_rx_stats_fops = {
+ .owner = THIS_MODULE,
+ .open = i2400m_stats_open,
+ .read = i2400m_rx_stats_read,
+ .write = i2400m_rx_stats_write,
+};
+
+
+/* See i2400m_rx_stats_read() */
+static
+ssize_t i2400m_tx_stats_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct i2400m *i2400m = filp->private_data;
+ char buf[128];
+ unsigned long flags;
+
+ if (*ppos != 0)
+ return 0;
+ if (count < sizeof(buf))
+ return -ENOSPC;
+ spin_lock_irqsave(&i2400m->tx_lock, flags);
+ snprintf(buf, sizeof(buf), "%u %u %u %u %u %u %u\n",
+ i2400m->tx_pl_num, i2400m->tx_pl_min,
+ i2400m->tx_pl_max, i2400m->tx_num,
+ i2400m->tx_size_acc,
+ i2400m->tx_size_min, i2400m->tx_size_max);
+ spin_unlock_irqrestore(&i2400m->tx_lock, flags);
+ return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
+}
+
+/* Any write clears the stats */
+static
+ssize_t i2400m_tx_stats_write(struct file *filp, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct i2400m *i2400m = filp->private_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&i2400m->tx_lock, flags);
+ i2400m->tx_pl_num = 0;
+ i2400m->tx_pl_max = 0;
+ i2400m->tx_pl_min = UINT_MAX;
+ i2400m->tx_num = 0;
+ i2400m->tx_size_acc = 0;
+ i2400m->tx_size_min = UINT_MAX;
+ i2400m->tx_size_max = 0;
+ spin_unlock_irqrestore(&i2400m->tx_lock, flags);
+ return count;
+}
+
+static
+const struct file_operations i2400m_tx_stats_fops = {
+ .owner = THIS_MODULE,
+ .open = i2400m_stats_open,
+ .read = i2400m_tx_stats_read,
+ .write = i2400m_tx_stats_write,
+};
+
+
+/* Write 1 to ask the device to go into suspend */
+static
+int debugfs_i2400m_suspend_set(void *data, u64 val)
+{
+ int result;
+ struct i2400m *i2400m = data;
+ result = i2400m_cmd_enter_powersave(i2400m);
+ if (result >= 0)
+ result = 0;
+ return result;
+}
+DEFINE_SIMPLE_ATTRIBUTE(fops_i2400m_suspend,
+ NULL, debugfs_i2400m_suspend_set,
+ "%llu\n");
+
+static
+struct dentry *debugfs_create_i2400m_suspend(
+ const char *name, struct dentry *parent, struct i2400m *i2400m)
+{
+ return debugfs_create_file(name, 0200, parent, i2400m,
+ &fops_i2400m_suspend);
+}
+
+
+/*
+ * Reset the device
+ *
+ * Write 0 to ask the device to soft reset, 1 to cold reset, 2 to bus
+ * reset (as defined by enum i2400m_reset_type).
+ */
+static
+int debugfs_i2400m_reset_set(void *data, u64 val)
+{
+ int result;
+ struct i2400m *i2400m = data;
+ enum i2400m_reset_type rt = val;
+ switch(rt) {
+ case I2400M_RT_WARM:
+ case I2400M_RT_COLD:
+ case I2400M_RT_BUS:
+ result = i2400m->bus_reset(i2400m, rt);
+ if (result >= 0)
+ result = 0;
+ default:
+ result = -EINVAL;
+ }
+ return result;
+}
+DEFINE_SIMPLE_ATTRIBUTE(fops_i2400m_reset,
+ NULL, debugfs_i2400m_reset_set,
+ "%llu\n");
+
+static
+struct dentry *debugfs_create_i2400m_reset(
+ const char *name, struct dentry *parent, struct i2400m *i2400m)
+{
+ return debugfs_create_file(name, 0200, parent, i2400m,
+ &fops_i2400m_reset);
+}
+
+/*
+ * Debug levels control; see debug.h
+ */
+struct d_level D_LEVEL[] = {
+ D_SUBMODULE_DEFINE(control),
+ D_SUBMODULE_DEFINE(driver),
+ D_SUBMODULE_DEFINE(debugfs),
+ D_SUBMODULE_DEFINE(fw),
+ D_SUBMODULE_DEFINE(netdev),
+ D_SUBMODULE_DEFINE(rfkill),
+ D_SUBMODULE_DEFINE(rx),
+ D_SUBMODULE_DEFINE(tx),
+};
+size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL);
+
+#define __debugfs_register(prefix, name, parent) \
+do { \
+ result = d_level_register_debugfs(prefix, name, parent); \
+ if (result < 0) \
+ goto error; \
+} while (0)
+
+
+int i2400m_debugfs_add(struct i2400m *i2400m)
+{
+ int result;
+ struct device *dev = i2400m_dev(i2400m);
+ struct dentry *dentry = i2400m->wimax_dev.debugfs_dentry;
+ struct dentry *fd;
+
+ dentry = debugfs_create_dir("i2400m", dentry);
+ result = PTR_ERR(dentry);
+ if (IS_ERR(dentry)) {
+ if (result == -ENODEV)
+ result = 0; /* No debugfs support */
+ goto error;
+ }
+ i2400m->debugfs_dentry = dentry;
+ __debugfs_register("dl_", control, dentry);
+ __debugfs_register("dl_", driver, dentry);
+ __debugfs_register("dl_", debugfs, dentry);
+ __debugfs_register("dl_", fw, dentry);
+ __debugfs_register("dl_", netdev, dentry);
+ __debugfs_register("dl_", rfkill, dentry);
+ __debugfs_register("dl_", rx, dentry);
+ __debugfs_register("dl_", tx, dentry);
+
+ fd = debugfs_create_size_t("tx_in", 0400, dentry,
+ &i2400m->tx_in);
+ result = PTR_ERR(fd);
+ if (IS_ERR(fd) && result != -ENODEV) {
+ dev_err(dev, "Can't create debugfs entry "
+ "tx_in: %d\n", result);
+ goto error;
+ }
+
+ fd = debugfs_create_size_t("tx_out", 0400, dentry,
+ &i2400m->tx_out);
+ result = PTR_ERR(fd);
+ if (IS_ERR(fd) && result != -ENODEV) {
+ dev_err(dev, "Can't create debugfs entry "
+ "tx_out: %d\n", result);
+ goto error;
+ }
+
+ fd = debugfs_create_u32("state", 0600, dentry,
+ &i2400m->state);
+ result = PTR_ERR(fd);
+ if (IS_ERR(fd) && result != -ENODEV) {
+ dev_err(dev, "Can't create debugfs entry "
+ "state: %d\n", result);
+ goto error;
+ }
+
+ /*
+ * Trace received messages from user space
+ *
+ * In order to tap the bidirectional message stream in the
+ * 'msg' pipe, user space can read from the 'msg' pipe;
+ * however, due to limitations in libnl, we can't know what
+ * the different applications are sending down to the kernel.
+ *
+ * So we have this hack where the driver will echo any message
+ * received on the msg pipe from user space [through a call to
+ * wimax_dev->op_msg_from_user() into
+ * i2400m_op_msg_from_user()] into the 'trace' pipe that this
+ * driver creates.
+ *
+ * So then, reading from both the 'trace' and 'msg' pipes in
+ * user space will provide a full dump of the traffic.
+ *
+ * Write 1 to activate, 0 to clear.
+ *
+ * It is not really very atomic, but it is also not too
+ * critical.
+ */
+ fd = debugfs_create_u8("trace_msg_from_user", 0600, dentry,
+ &i2400m->trace_msg_from_user);
+ result = PTR_ERR(fd);
+ if (IS_ERR(fd) && result != -ENODEV) {
+ dev_err(dev, "Can't create debugfs entry "
+ "trace_msg_from_user: %d\n", result);
+ goto error;
+ }
+
+ fd = debugfs_create_netdev_queue_stopped("netdev_queue_stopped",
+ dentry, i2400m);
+ result = PTR_ERR(fd);
+ if (IS_ERR(fd) && result != -ENODEV) {
+ dev_err(dev, "Can't create debugfs entry "
+ "netdev_queue_stopped: %d\n", result);
+ goto error;
+ }
+
+ fd = debugfs_create_file("rx_stats", 0600, dentry, i2400m,
+ &i2400m_rx_stats_fops);
+ result = PTR_ERR(fd);
+ if (IS_ERR(fd) && result != -ENODEV) {
+ dev_err(dev, "Can't create debugfs entry "
+ "rx_stats: %d\n", result);
+ goto error;
+ }
+
+ fd = debugfs_create_file("tx_stats", 0600, dentry, i2400m,
+ &i2400m_tx_stats_fops);
+ result = PTR_ERR(fd);
+ if (IS_ERR(fd) && result != -ENODEV) {
+ dev_err(dev, "Can't create debugfs entry "
+ "tx_stats: %d\n", result);
+ goto error;
+ }
+
+ fd = debugfs_create_i2400m_suspend("suspend", dentry, i2400m);
+ result = PTR_ERR(fd);
+ if (IS_ERR(fd) && result != -ENODEV) {
+ dev_err(dev, "Can't create debugfs entry suspend: %d\n",
+ result);
+ goto error;
+ }
+
+ fd = debugfs_create_i2400m_reset("reset", dentry, i2400m);
+ result = PTR_ERR(fd);
+ if (IS_ERR(fd) && result != -ENODEV) {
+ dev_err(dev, "Can't create debugfs entry reset: %d\n", result);
+ goto error;
+ }
+
+ result = 0;
+error:
+ return result;
+}
+
+void i2400m_debugfs_rm(struct i2400m *i2400m)
+{
+ debugfs_remove_recursive(i2400m->debugfs_dentry);
+}
diff --git a/drivers/net/wimax/i2400m/driver.c b/drivers/net/wimax/i2400m/driver.c
new file mode 100644
index 00000000000..5f98047e18c
--- /dev/null
+++ b/drivers/net/wimax/i2400m/driver.c
@@ -0,0 +1,728 @@
+/*
+ * Intel Wireless WiMAX Connection 2400m
+ * Generic probe/disconnect, reset and message passing
+ *
+ *
+ * Copyright (C) 2007-2008 Intel Corporation
+ * Inaky Perez-Gonzalez
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * See i2400m.h for driver documentation. This contains helpers for
+ * the driver model glue [_setup()/_release()], handling device resets
+ * [_dev_reset_handle()], and the backends for the WiMAX stack ops
+ * reset [_op_reset()] and message from user [_op_msg_from_user()].
+ *
+ * ROADMAP:
+ *
+ * i2400m_op_msg_from_user()
+ * i2400m_msg_to_dev()
+ * wimax_msg_to_user_send()
+ *
+ * i2400m_op_reset()
+ * i240m->bus_reset()
+ *
+ * i2400m_dev_reset_handle()
+ * __i2400m_dev_reset_handle()
+ * __i2400m_dev_stop()
+ * __i2400m_dev_start()
+ *
+ * i2400m_setup()
+ * i2400m_bootrom_init()
+ * register_netdev()
+ * i2400m_dev_start()
+ * __i2400m_dev_start()
+ * i2400m_dev_bootstrap()
+ * i2400m_tx_setup()
+ * i2400m->bus_dev_start()
+ * i2400m_check_mac_addr()
+ * wimax_dev_add()
+ *
+ * i2400m_release()
+ * wimax_dev_rm()
+ * i2400m_dev_stop()
+ * __i2400m_dev_stop()
+ * i2400m_dev_shutdown()
+ * i2400m->bus_dev_stop()
+ * i2400m_tx_release()
+ * unregister_netdev()
+ */
+#include "i2400m.h"
+#include
+#include
+#include
+
+#define D_SUBMODULE driver
+#include "debug-levels.h"
+
+
+int i2400m_idle_mode_disabled; /* 0 (idle mode enabled) by default */
+module_param_named(idle_mode_disabled, i2400m_idle_mode_disabled, int, 0644);
+MODULE_PARM_DESC(idle_mode_disabled,
+ "If true, the device will not enable idle mode negotiation "
+ "with the base station (when connected) to save power.");
+
+/**
+ * i2400m_queue_work - schedule work on a i2400m's queue
+ *
+ * @i2400m: device descriptor
+ *
+ * @fn: function to run to execute work. It gets passed a 'struct
+ * work_struct' that is wrapped in a 'struct i2400m_work'. Once
+ * done, you have to (1) i2400m_put(i2400m_work->i2400m) and then
+ * (2) kfree(i2400m_work).
+ *
+ * @gfp_flags: GFP flags for memory allocation.
+ *
+ * @pl: pointer to a payload buffer that you want to pass to the _work
+ * function. Use this to pack (for example) a struct with extra
+ * arguments.
+ *
+ * @pl_size: size of the payload buffer.
+ *
+ * We do this quite often, so this just saves typing; allocate a
+ * wrapper for a i2400m, get a ref to it, pack arguments and launch
+ * the work.
+ *
+ * A usual workflow is:
+ *
+ * struct my_work_args {
+ * void *something;
+ * int whatever;
+ * };
+ * ...
+ *
+ * struct my_work_args my_args = {
+ * .something = FOO,
+ * .whaetever = BLAH
+ * };
+ * i2400m_queue_work(i2400m, 1, my_work_function, GFP_KERNEL,
+ * &args, sizeof(args))
+ *
+ * And now the work function can unpack the arguments and call the
+ * real function (or do the job itself):
+ *
+ * static
+ * void my_work_fn((struct work_struct *ws)
+ * {
+ * struct i2400m_work *iw =
+ * container_of(ws, struct i2400m_work, ws);
+ * struct my_work_args *my_args = (void *) iw->pl;
+ *
+ * my_work(iw->i2400m, my_args->something, my_args->whatevert);
+ * }
+ */
+int i2400m_queue_work(struct i2400m *i2400m,
+ void (*fn)(struct work_struct *), gfp_t gfp_flags,
+ const void *pl, size_t pl_size)
+{
+ int result;
+ struct i2400m_work *iw;
+
+ BUG_ON(i2400m->work_queue == NULL);
+ result = -ENOMEM;
+ iw = kzalloc(sizeof(*iw) + pl_size, gfp_flags);
+ if (iw == NULL)
+ goto error_kzalloc;
+ iw->i2400m = i2400m_get(i2400m);
+ memcpy(iw->pl, pl, pl_size);
+ INIT_WORK(&iw->ws, fn);
+ result = queue_work(i2400m->work_queue, &iw->ws);
+error_kzalloc:
+ return result;
+}
+EXPORT_SYMBOL_GPL(i2400m_queue_work);
+
+
+/*
+ * Schedule i2400m's specific work on the system's queue.
+ *
+ * Used for a few cases where we really need it; otherwise, identical
+ * to i2400m_queue_work().
+ *
+ * Returns < 0 errno code on error, 1 if ok.
+ *
+ * If it returns zero, something really bad happened, as it means the
+ * works struct was already queued, but we have just allocated it, so
+ * it should not happen.
+ */
+int i2400m_schedule_work(struct i2400m *i2400m,
+ void (*fn)(struct work_struct *), gfp_t gfp_flags)
+{
+ int result;
+ struct i2400m_work *iw;
+
+ BUG_ON(i2400m->work_queue == NULL);
+ result = -ENOMEM;
+ iw = kzalloc(sizeof(*iw), gfp_flags);
+ if (iw == NULL)
+ goto error_kzalloc;
+ iw->i2400m = i2400m_get(i2400m);
+ INIT_WORK(&iw->ws, fn);
+ result = schedule_work(&iw->ws);
+ if (result == 0)
+ result = -ENXIO;
+error_kzalloc:
+ return result;
+}
+
+
+/*
+ * WiMAX stack operation: relay a message from user space
+ *
+ * @wimax_dev: device descriptor
+ * @pipe_name: named pipe the message is for
+ * @msg_buf: pointer to the message bytes
+ * @msg_len: length of the buffer
+ * @genl_info: passed by the generic netlink layer
+ *
+ * The WiMAX stack will call this function when a message was received
+ * from user space.
+ *
+ * For the i2400m, this is an L3L4 message, as specified in
+ * include/linux/wimax/i2400m.h, and thus prefixed with a 'struct
+ * i2400m_l3l4_hdr'. Driver (and device) expect the messages to be
+ * coded in Little Endian.
+ *
+ * This function just verifies that the header declaration and the
+ * payload are consistent and then deals with it, either forwarding it
+ * to the device or procesing it locally.
+ *
+ * In the i2400m, messages are basically commands that will carry an
+ * ack, so we use i2400m_msg_to_dev() and then deliver the ack back to
+ * user space. The rx.c code might intercept the response and use it
+ * to update the driver's state, but then it will pass it on so it can
+ * be relayed back to user space.
+ *
+ * Note that asynchronous events from the device are processed and
+ * sent to user space in rx.c.
+ */
+static
+int i2400m_op_msg_from_user(struct wimax_dev *wimax_dev,
+ const char *pipe_name,
+ const void *msg_buf, size_t msg_len,
+ const struct genl_info *genl_info)
+{
+ int result;
+ struct i2400m *i2400m = wimax_dev_to_i2400m(wimax_dev);
+ struct device *dev = i2400m_dev(i2400m);
+ struct sk_buff *ack_skb;
+
+ d_fnstart(4, dev, "(wimax_dev %p [i2400m %p] msg_buf %p "
+ "msg_len %zu genl_info %p)\n", wimax_dev, i2400m,
+ msg_buf, msg_len, genl_info);
+ ack_skb = i2400m_msg_to_dev(i2400m, msg_buf, msg_len);
+ result = PTR_ERR(ack_skb);
+ if (IS_ERR(ack_skb))
+ goto error_msg_to_dev;
+ if (unlikely(i2400m->trace_msg_from_user))
+ wimax_msg(&i2400m->wimax_dev, "trace",
+ msg_buf, msg_len, GFP_KERNEL);
+ result = wimax_msg_send(&i2400m->wimax_dev, ack_skb);
+error_msg_to_dev:
+ d_fnend(4, dev, "(wimax_dev %p [i2400m %p] msg_buf %p msg_len %zu "
+ "genl_info %p) = %d\n", wimax_dev, i2400m, msg_buf, msg_len,
+ genl_info, result);
+ return result;
+}
+
+
+/*
+ * Context to wait for a reset to finalize
+ */
+struct i2400m_reset_ctx {
+ struct completion completion;
+ int result;
+};
+
+
+/*
+ * WiMAX stack operation: reset a device
+ *
+ * @wimax_dev: device descriptor
+ *
+ * See the documentation for wimax_reset() and wimax_dev->op_reset for
+ * the requirements of this function. The WiMAX stack guarantees
+ * serialization on calls to this function.
+ *
+ * Do a warm reset on the device; if it fails, resort to a cold reset
+ * and return -ENODEV. On successful warm reset, we need to block
+ * until it is complete.
+ *
+ * The bus-driver implementation of reset takes care of falling back
+ * to cold reset if warm fails.
+ */
+static
+int i2400m_op_reset(struct wimax_dev *wimax_dev)
+{
+ int result;
+ struct i2400m *i2400m = wimax_dev_to_i2400m(wimax_dev);
+ struct device *dev = i2400m_dev(i2400m);
+ struct i2400m_reset_ctx ctx = {
+ .completion = COMPLETION_INITIALIZER_ONSTACK(ctx.completion),
+ .result = 0,
+ };
+
+ d_fnstart(4, dev, "(wimax_dev %p)\n", wimax_dev);
+ mutex_lock(&i2400m->init_mutex);
+ i2400m->reset_ctx = &ctx;
+ mutex_unlock(&i2400m->init_mutex);
+ result = i2400m->bus_reset(i2400m, I2400M_RT_WARM);
+ if (result < 0)
+ goto out;
+ result = wait_for_completion_timeout(&ctx.completion, 4*HZ);
+ if (result == 0)
+ result = -ETIMEDOUT;
+ else if (result > 0)
+ result = ctx.result;
+ /* if result < 0, pass it on */
+ mutex_lock(&i2400m->init_mutex);
+ i2400m->reset_ctx = NULL;
+ mutex_unlock(&i2400m->init_mutex);
+out:
+ d_fnend(4, dev, "(wimax_dev %p) = %d\n", wimax_dev, result);
+ return result;
+}
+
+
+/*
+ * Check the MAC address we got from boot mode is ok
+ *
+ * @i2400m: device descriptor
+ *
+ * Returns: 0 if ok, < 0 errno code on error.
+ */
+static
+int i2400m_check_mac_addr(struct i2400m *i2400m)
+{
+ int result;
+ struct device *dev = i2400m_dev(i2400m);
+ struct sk_buff *skb;
+ const struct i2400m_tlv_detailed_device_info *ddi;
+ struct net_device *net_dev = i2400m->wimax_dev.net_dev;
+ const unsigned char zeromac[ETH_ALEN] = { 0 };
+
+ d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
+ skb = i2400m_get_device_info(i2400m);
+ if (IS_ERR(skb)) {
+ result = PTR_ERR(skb);
+ dev_err(dev, "Cannot verify MAC address, error reading: %d\n",
+ result);
+ goto error;
+ }
+ /* Extract MAC addresss */
+ ddi = (void *) skb->data;
+ BUILD_BUG_ON(ETH_ALEN != sizeof(ddi->mac_address));
+ d_printf(2, dev, "GET DEVICE INFO: mac addr "
+ "%02x:%02x:%02x:%02x:%02x:%02x\n",
+ ddi->mac_address[0], ddi->mac_address[1],
+ ddi->mac_address[2], ddi->mac_address[3],
+ ddi->mac_address[4], ddi->mac_address[5]);
+ if (!memcmp(net_dev->perm_addr, ddi->mac_address,
+ sizeof(ddi->mac_address)))
+ goto ok;
+ dev_warn(dev, "warning: device reports a different MAC address "
+ "to that of boot mode's\n");
+ dev_warn(dev, "device reports %02x:%02x:%02x:%02x:%02x:%02x\n",
+ ddi->mac_address[0], ddi->mac_address[1],
+ ddi->mac_address[2], ddi->mac_address[3],
+ ddi->mac_address[4], ddi->mac_address[5]);
+ dev_warn(dev, "boot mode reported %02x:%02x:%02x:%02x:%02x:%02x\n",
+ net_dev->perm_addr[0], net_dev->perm_addr[1],
+ net_dev->perm_addr[2], net_dev->perm_addr[3],
+ net_dev->perm_addr[4], net_dev->perm_addr[5]);
+ if (!memcmp(zeromac, ddi->mac_address, sizeof(zeromac)))
+ dev_err(dev, "device reports an invalid MAC address, "
+ "not updating\n");
+ else {
+ dev_warn(dev, "updating MAC address\n");
+ net_dev->addr_len = ETH_ALEN;
+ memcpy(net_dev->perm_addr, ddi->mac_address, ETH_ALEN);
+ memcpy(net_dev->dev_addr, ddi->mac_address, ETH_ALEN);
+ }
+ok:
+ result = 0;
+ kfree_skb(skb);
+error:
+ d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result);
+ return result;
+}
+
+
+/**
+ * __i2400m_dev_start - Bring up driver communication with the device
+ *
+ * @i2400m: device descriptor
+ * @flags: boot mode flags
+ *
+ * Returns: 0 if ok, < 0 errno code on error.
+ *
+ * Uploads firmware and brings up all the resources needed to be able
+ * to communicate with the device.
+ *
+ * TX needs to be setup before the bus-specific code (otherwise on
+ * shutdown, the bus-tx code could try to access it).
+ */
+static
+int __i2400m_dev_start(struct i2400m *i2400m, enum i2400m_bri flags)
+{
+ int result;
+ struct wimax_dev *wimax_dev = &i2400m->wimax_dev;
+ struct net_device *net_dev = wimax_dev->net_dev;
+ struct device *dev = i2400m_dev(i2400m);
+ int times = 3;
+
+ d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
+retry:
+ result = i2400m_dev_bootstrap(i2400m, flags);
+ if (result < 0) {
+ dev_err(dev, "cannot bootstrap device: %d\n", result);
+ goto error_bootstrap;
+ }
+ result = i2400m_tx_setup(i2400m);
+ if (result < 0)
+ goto error_tx_setup;
+ result = i2400m->bus_dev_start(i2400m);
+ if (result < 0)
+ goto error_bus_dev_start;
+ i2400m->work_queue = create_singlethread_workqueue(wimax_dev->name);
+ if (i2400m->work_queue == NULL) {
+ result = -ENOMEM;
+ dev_err(dev, "cannot create workqueue\n");
+ goto error_create_workqueue;
+ }
+ /* At this point is ok to send commands to the device */
+ result = i2400m_check_mac_addr(i2400m);
+ if (result < 0)
+ goto error_check_mac_addr;
+ i2400m->ready = 1;
+ wimax_state_change(wimax_dev, WIMAX_ST_UNINITIALIZED);
+ result = i2400m_dev_initialize(i2400m);
+ if (result < 0)
+ goto error_dev_initialize;
+ /* At this point, reports will come for the device and set it
+ * to the right state if it is different than UNINITIALIZED */
+ d_fnend(3, dev, "(net_dev %p [i2400m %p]) = %d\n",
+ net_dev, i2400m, result);
+ return result;
+
+error_dev_initialize:
+error_check_mac_addr:
+ destroy_workqueue(i2400m->work_queue);
+error_create_workqueue:
+ i2400m->bus_dev_stop(i2400m);
+error_bus_dev_start:
+ i2400m_tx_release(i2400m);
+error_tx_setup:
+error_bootstrap:
+ if (result == -ERESTARTSYS && times-- > 0) {
+ flags = I2400M_BRI_SOFT;
+ goto retry;
+ }
+ d_fnend(3, dev, "(net_dev %p [i2400m %p]) = %d\n",
+ net_dev, i2400m, result);
+ return result;
+}
+
+
+static
+int i2400m_dev_start(struct i2400m *i2400m, enum i2400m_bri bm_flags)
+{
+ int result;
+ mutex_lock(&i2400m->init_mutex); /* Well, start the device */
+ result = __i2400m_dev_start(i2400m, bm_flags);
+ if (result >= 0)
+ i2400m->updown = 1;
+ mutex_unlock(&i2400m->init_mutex);
+ return result;
+}
+
+
+/**
+ * i2400m_dev_stop - Tear down driver communication with the device
+ *
+ * @i2400m: device descriptor
+ *
+ * Returns: 0 if ok, < 0 errno code on error.
+ *
+ * Releases all the resources allocated to communicate with the device.
+ */
+static
+void __i2400m_dev_stop(struct i2400m *i2400m)
+{
+ struct wimax_dev *wimax_dev = &i2400m->wimax_dev;
+ struct device *dev = i2400m_dev(i2400m);
+
+ d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
+ wimax_state_change(wimax_dev, __WIMAX_ST_QUIESCING);
+ i2400m_dev_shutdown(i2400m);
+ i2400m->ready = 0;
+ destroy_workqueue(i2400m->work_queue);
+ i2400m->bus_dev_stop(i2400m);
+ i2400m_tx_release(i2400m);
+ wimax_state_change(wimax_dev, WIMAX_ST_DOWN);
+ d_fnend(3, dev, "(i2400m %p) = 0\n", i2400m);
+}
+
+
+/*
+ * Watch out -- we only need to stop if there is a need for it. The
+ * device could have reset itself and failed to come up again (see
+ * _i2400m_dev_reset_handle()).
+ */
+static
+void i2400m_dev_stop(struct i2400m *i2400m)
+{
+ mutex_lock(&i2400m->init_mutex);
+ if (i2400m->updown) {
+ __i2400m_dev_stop(i2400m);
+ i2400m->updown = 0;
+ }
+ mutex_unlock(&i2400m->init_mutex);
+}
+
+
+/*
+ * The device has rebooted; fix up the device and the driver
+ *
+ * Tear down the driver communication with the device, reload the
+ * firmware and reinitialize the communication with the device.
+ *
+ * If someone calls a reset when the device's firmware is down, in
+ * theory we won't see it because we are not listening. However, just
+ * in case, leave the code to handle it.
+ *
+ * If there is a reset context, use it; this means someone is waiting
+ * for us to tell him when the reset operation is complete and the
+ * device is ready to rock again.
+ *
+ * NOTE: if we are in the process of bringing up or down the
+ * communication with the device [running i2400m_dev_start() or
+ * _stop()], don't do anything, let it fail and handle it.
+ *
+ * This function is ran always in a thread context
+ */
+static
+void __i2400m_dev_reset_handle(struct work_struct *ws)
+{
+ int result;
+ struct i2400m_work *iw = container_of(ws, struct i2400m_work, ws);
+ struct i2400m *i2400m = iw->i2400m;
+ struct device *dev = i2400m_dev(i2400m);
+ enum wimax_st wimax_state;
+ struct i2400m_reset_ctx *ctx = i2400m->reset_ctx;
+
+ d_fnstart(3, dev, "(ws %p i2400m %p)\n", ws, i2400m);
+ result = 0;
+ if (mutex_trylock(&i2400m->init_mutex) == 0) {
+ /* We are still in i2400m_dev_start() [let it fail] or
+ * i2400m_dev_stop() [we are shutting down anyway, so
+ * ignore it] or we are resetting somewhere else. */
+ dev_err(dev, "device rebooted\n");
+ i2400m_msg_to_dev_cancel_wait(i2400m, -ERESTARTSYS);
+ complete(&i2400m->msg_completion);
+ goto out;
+ }
+ wimax_state = wimax_state_get(&i2400m->wimax_dev);
+ if (wimax_state < WIMAX_ST_UNINITIALIZED) {
+ dev_info(dev, "device rebooted: it is down, ignoring\n");
+ goto out_unlock; /* ifconfig up/down wasn't called */
+ }
+ dev_err(dev, "device rebooted: reinitializing driver\n");
+ __i2400m_dev_stop(i2400m);
+ i2400m->updown = 0;
+ result = __i2400m_dev_start(i2400m,
+ I2400M_BRI_SOFT | I2400M_BRI_MAC_REINIT);
+ if (result < 0) {
+ dev_err(dev, "device reboot: cannot start the device: %d\n",
+ result);
+ result = i2400m->bus_reset(i2400m, I2400M_RT_BUS);
+ if (result >= 0)
+ result = -ENODEV;
+ } else
+ i2400m->updown = 1;
+out_unlock:
+ if (i2400m->reset_ctx) {
+ ctx->result = result;
+ complete(&ctx->completion);
+ }
+ mutex_unlock(&i2400m->init_mutex);
+out:
+ i2400m_put(i2400m);
+ kfree(iw);
+ d_fnend(3, dev, "(ws %p i2400m %p) = void\n", ws, i2400m);
+ return;
+}
+
+
+/**
+ * i2400m_dev_reset_handle - Handle a device's reset in a thread context
+ *
+ * Schedule a device reset handling out on a thread context, so it
+ * is safe to call from atomic context. We can't use the i2400m's
+ * queue as we are going to destroy it and reinitialize it as part of
+ * the driver bringup/bringup process.
+ *
+ * See __i2400m_dev_reset_handle() for details; that takes care of
+ * reinitializing the driver to handle the reset, calling into the
+ * bus-specific functions ops as needed.
+ */
+int i2400m_dev_reset_handle(struct i2400m *i2400m)
+{
+ return i2400m_schedule_work(i2400m, __i2400m_dev_reset_handle,
+ GFP_ATOMIC);
+}
+EXPORT_SYMBOL_GPL(i2400m_dev_reset_handle);
+
+
+/**
+ * i2400m_setup - bus-generic setup function for the i2400m device
+ *
+ * @i2400m: device descriptor (bus-specific parts have been initialized)
+ *
+ * Returns: 0 if ok, < 0 errno code on error.
+ *
+ * Initializes the bus-generic parts of the i2400m driver; the
+ * bus-specific parts have been initialized, function pointers filled
+ * out by the bus-specific probe function.
+ *
+ * As well, this registers the WiMAX and net device nodes. Once this
+ * function returns, the device is operative and has to be ready to
+ * receive and send network traffic and WiMAX control operations.
+ */
+int i2400m_setup(struct i2400m *i2400m, enum i2400m_bri bm_flags)
+{
+ int result = -ENODEV;
+ struct device *dev = i2400m_dev(i2400m);
+ struct wimax_dev *wimax_dev = &i2400m->wimax_dev;
+ struct net_device *net_dev = i2400m->wimax_dev.net_dev;
+
+ d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
+
+ snprintf(wimax_dev->name, sizeof(wimax_dev->name),
+ "i2400m-%s:%s", dev->bus->name, dev->bus_id);
+
+ i2400m->bm_cmd_buf = kzalloc(I2400M_BM_CMD_BUF_SIZE, GFP_KERNEL);
+ if (i2400m->bm_cmd_buf == NULL) {
+ dev_err(dev, "cannot allocate USB command buffer\n");
+ goto error_bm_cmd_kzalloc;
+ }
+ i2400m->bm_ack_buf = kzalloc(I2400M_BM_ACK_BUF_SIZE, GFP_KERNEL);
+ if (i2400m->bm_ack_buf == NULL) {
+ dev_err(dev, "cannot allocate USB ack buffer\n");
+ goto error_bm_ack_buf_kzalloc;
+ }
+ result = i2400m_bootrom_init(i2400m, bm_flags);
+ if (result < 0) {
+ dev_err(dev, "read mac addr: bootrom init "
+ "failed: %d\n", result);
+ goto error_bootrom_init;
+ }
+ result = i2400m_read_mac_addr(i2400m);
+ if (result < 0)
+ goto error_read_mac_addr;
+
+ result = register_netdev(net_dev); /* Okey dokey, bring it up */
+ if (result < 0) {
+ dev_err(dev, "cannot register i2400m network device: %d\n",
+ result);
+ goto error_register_netdev;
+ }
+ netif_carrier_off(net_dev);
+
+ result = i2400m_dev_start(i2400m, bm_flags);
+ if (result < 0)
+ goto error_dev_start;
+
+ i2400m->wimax_dev.op_msg_from_user = i2400m_op_msg_from_user;
+ i2400m->wimax_dev.op_rfkill_sw_toggle = i2400m_op_rfkill_sw_toggle;
+ i2400m->wimax_dev.op_reset = i2400m_op_reset;
+ result = wimax_dev_add(&i2400m->wimax_dev, net_dev);
+ if (result < 0)
+ goto error_wimax_dev_add;
+ /* User space needs to do some init stuff */
+ wimax_state_change(wimax_dev, WIMAX_ST_UNINITIALIZED);
+
+ /* Now setup all that requires a registered net and wimax device. */
+ result = i2400m_debugfs_add(i2400m);
+ if (result < 0) {
+ dev_err(dev, "cannot setup i2400m's debugfs: %d\n", result);
+ goto error_debugfs_setup;
+ }
+ d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result);
+ return result;
+
+error_debugfs_setup:
+ wimax_dev_rm(&i2400m->wimax_dev);
+error_wimax_dev_add:
+ i2400m_dev_stop(i2400m);
+error_dev_start:
+ unregister_netdev(net_dev);
+error_register_netdev:
+error_read_mac_addr:
+error_bootrom_init:
+ kfree(i2400m->bm_ack_buf);
+error_bm_ack_buf_kzalloc:
+ kfree(i2400m->bm_cmd_buf);
+error_bm_cmd_kzalloc:
+ d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result);
+ return result;
+}
+EXPORT_SYMBOL_GPL(i2400m_setup);
+
+
+/**
+ * i2400m_release - release the bus-generic driver resources
+ *
+ * Sends a disconnect message and undoes any setup done by i2400m_setup()
+ */
+void i2400m_release(struct i2400m *i2400m)
+{
+ struct device *dev = i2400m_dev(i2400m);
+
+ d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
+ netif_stop_queue(i2400m->wimax_dev.net_dev);
+
+ i2400m_debugfs_rm(i2400m);
+ wimax_dev_rm(&i2400m->wimax_dev);
+ i2400m_dev_stop(i2400m);
+ unregister_netdev(i2400m->wimax_dev.net_dev);
+ kfree(i2400m->bm_ack_buf);
+ kfree(i2400m->bm_cmd_buf);
+ d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
+}
+EXPORT_SYMBOL_GPL(i2400m_release);
+
+
+static
+int __init i2400m_driver_init(void)
+{
+ return 0;
+}
+module_init(i2400m_driver_init);
+
+static
+void __exit i2400m_driver_exit(void)
+{
+ /* for scheds i2400m_dev_reset_handle() */
+ flush_scheduled_work();
+ return;
+}
+module_exit(i2400m_driver_exit);
+
+MODULE_AUTHOR("Intel Corporation ");
+MODULE_DESCRIPTION("Intel 2400M WiMAX networking bus-generic driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c
new file mode 100644
index 00000000000..1d8271f34c3
--- /dev/null
+++ b/drivers/net/wimax/i2400m/fw.c
@@ -0,0 +1,1095 @@
+/*
+ * Intel Wireless WiMAX Connection 2400m
+ * Firmware uploader
+ *
+ *
+ * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Intel Corporation
+ * Yanir Lubetkin
+ * Inaky Perez-Gonzalez
+ * - Initial implementation
+ *
+ *
+ * THE PROCEDURE
+ *
+ * (this is decribed for USB, but for SDIO is similar)
+ *
+ * The 2400m works in two modes: boot-mode or normal mode. In boot
+ * mode we can execute only a handful of commands targeted at
+ * uploading the firmware and launching it.
+ *
+ * The 2400m enters boot mode when it is first connected to the
+ * system, when it crashes and when you ask it to reboot. There are
+ * two submodes of the boot mode: signed and non-signed. Signed takes
+ * firmwares signed with a certain private key, non-signed takes any
+ * firmware. Normal hardware takes only signed firmware.
+ *
+ * Upon entrance to boot mode, the device sends a few zero length
+ * packets (ZLPs) on the notification endpoint, then a reboot barker
+ * (4 le32 words with value I2400M_{S,N}BOOT_BARKER). We ack it by
+ * sending the same barker on the bulk out endpoint. The device acks
+ * with a reboot ack barker (4 le32 words with value 0xfeedbabe) and
+ * then the device is fully rebooted. At this point we can upload the
+ * firmware.
+ *
+ * This process is accomplished by the i2400m_bootrom_init()
+ * function. All the device interaction happens through the
+ * i2400m_bm_cmd() [boot mode command]. Special return values will
+ * indicate if the device resets.
+ *
+ * After this, we read the MAC address and then (if needed)
+ * reinitialize the device. We need to read it ahead of time because
+ * in the future, we might not upload the firmware until userspace
+ * 'ifconfig up's the device.
+ *
+ * We can then upload the firmware file. The file is composed of a BCF
+ * header (basic data, keys and signatures) and a list of write
+ * commands and payloads. We first upload the header
+ * [i2400m_dnload_init()] and then pass the commands and payloads
+ * verbatim to the i2400m_bm_cmd() function
+ * [i2400m_dnload_bcf()]. Then we tell the device to jump to the new
+ * firmware [i2400m_dnload_finalize()].
+ *
+ * Once firmware is uploaded, we are good to go :)
+ *
+ * When we don't know in which mode we are, we first try by sending a
+ * warm reset request that will take us to boot-mode. If we time out
+ * waiting for a reboot barker, that means maybe we are already in
+ * boot mode, so we send a reboot barker.
+ *
+ * COMMAND EXECUTION
+ *
+ * This code (and process) is single threaded; for executing commands,
+ * we post a URB to the notification endpoint, post the command, wait
+ * for data on the notification buffer. We don't need to worry about
+ * others as we know we are the only ones in there.
+ *
+ * BACKEND IMPLEMENTATION
+ *
+ * This code is bus-generic; the bus-specific driver provides back end
+ * implementations to send a boot mode command to the device and to
+ * read an acknolwedgement from it (or an asynchronous notification)
+ * from it.
+ *
+ * ROADMAP
+ *
+ * i2400m_dev_bootstrap Called by __i2400m_dev_start()
+ * request_firmware
+ * i2400m_fw_check
+ * i2400m_fw_dnload
+ * release_firmware
+ *
+ * i2400m_fw_dnload
+ * i2400m_bootrom_init
+ * i2400m_bm_cmd
+ * i2400m->bus_reset
+ * i2400m_dnload_init
+ * i2400m_dnload_init_signed
+ * i2400m_dnload_init_nonsigned
+ * i2400m_download_chunk
+ * i2400m_bm_cmd
+ * i2400m_dnload_bcf
+ * i2400m_bm_cmd
+ * i2400m_dnload_finalize
+ * i2400m_bm_cmd
+ *
+ * i2400m_bm_cmd
+ * i2400m->bus_bm_cmd_send()
+ * i2400m->bus_bm_wait_for_ack
+ * __i2400m_bm_ack_verify
+ *
+ * i2400m_bm_cmd_prepare Used by bus-drivers to prep
+ * commands before sending
+ */
+#include
+#include
+#include
+#include "i2400m.h"
+
+
+#define D_SUBMODULE fw
+#include "debug-levels.h"
+
+
+static const __le32 i2400m_ACK_BARKER[4] = {
+ __constant_cpu_to_le32(I2400M_ACK_BARKER),
+ __constant_cpu_to_le32(I2400M_ACK_BARKER),
+ __constant_cpu_to_le32(I2400M_ACK_BARKER),
+ __constant_cpu_to_le32(I2400M_ACK_BARKER)
+};
+
+
+/**
+ * Prepare a boot-mode command for delivery
+ *
+ * @cmd: pointer to bootrom header to prepare
+ *
+ * Computes checksum if so needed. After calling this function, DO NOT
+ * modify the command or header as the checksum won't work anymore.
+ *
+ * We do it from here because some times we cannot do it in the
+ * original context the command was sent (it is a const), so when we
+ * copy it to our staging buffer, we add the checksum there.
+ */
+void i2400m_bm_cmd_prepare(struct i2400m_bootrom_header *cmd)
+{
+ if (i2400m_brh_get_use_checksum(cmd)) {
+ int i;
+ u32 checksum = 0;
+ const u32 *checksum_ptr = (void *) cmd->payload;
+ for (i = 0; i < cmd->data_size / 4; i++)
+ checksum += cpu_to_le32(*checksum_ptr++);
+ checksum += cmd->command + cmd->target_addr + cmd->data_size;
+ cmd->block_checksum = cpu_to_le32(checksum);
+ }
+}
+EXPORT_SYMBOL_GPL(i2400m_bm_cmd_prepare);
+
+
+/*
+ * Verify the ack data received
+ *
+ * Given a reply to a boot mode command, chew it and verify everything
+ * is ok.
+ *
+ * @opcode: opcode which generated this ack. For error messages.
+ * @ack: pointer to ack data we received
+ * @ack_size: size of that data buffer
+ * @flags: I2400M_BM_CMD_* flags we called the command with.
+ *
+ * Way too long function -- maybe it should be further split
+ */
+static
+ssize_t __i2400m_bm_ack_verify(struct i2400m *i2400m, int opcode,
+ struct i2400m_bootrom_header *ack,
+ size_t ack_size, int flags)
+{
+ ssize_t result = -ENOMEM;
+ struct device *dev = i2400m_dev(i2400m);
+
+ d_fnstart(8, dev, "(i2400m %p opcode %d ack %p size %zu)\n",
+ i2400m, opcode, ack, ack_size);
+ if (ack_size < sizeof(*ack)) {
+ result = -EIO;
+ dev_err(dev, "boot-mode cmd %d: HW BUG? notification didn't "
+ "return enough data (%zu bytes vs %zu expected)\n",
+ opcode, ack_size, sizeof(*ack));
+ goto error_ack_short;
+ }
+ if (ack_size == sizeof(i2400m_NBOOT_BARKER)
+ && memcmp(ack, i2400m_NBOOT_BARKER, sizeof(*ack)) == 0) {
+ result = -ERESTARTSYS;
+ i2400m->sboot = 0;
+ d_printf(6, dev, "boot-mode cmd %d: "
+ "HW non-signed boot barker\n", opcode);
+ goto error_reboot;
+ }
+ if (ack_size == sizeof(i2400m_SBOOT_BARKER)
+ && memcmp(ack, i2400m_SBOOT_BARKER, sizeof(*ack)) == 0) {
+ result = -ERESTARTSYS;
+ i2400m->sboot = 1;
+ d_printf(6, dev, "boot-mode cmd %d: HW signed reboot barker\n",
+ opcode);
+ goto error_reboot;
+ }
+ if (ack_size == sizeof(i2400m_ACK_BARKER)
+ && memcmp(ack, i2400m_ACK_BARKER, sizeof(*ack)) == 0) {
+ result = -EISCONN;
+ d_printf(3, dev, "boot-mode cmd %d: HW reboot ack barker\n",
+ opcode);
+ goto error_reboot_ack;
+ }
+ result = 0;
+ if (flags & I2400M_BM_CMD_RAW)
+ goto out_raw;
+ ack->data_size = le32_to_cpu(ack->data_size);
+ ack->target_addr = le32_to_cpu(ack->target_addr);
+ ack->block_checksum = le32_to_cpu(ack->block_checksum);
+ d_printf(5, dev, "boot-mode cmd %d: notification for opcode %u "
+ "response %u csum %u rr %u da %u\n",
+ opcode, i2400m_brh_get_opcode(ack),
+ i2400m_brh_get_response(ack),
+ i2400m_brh_get_use_checksum(ack),
+ i2400m_brh_get_response_required(ack),
+ i2400m_brh_get_direct_access(ack));
+ result = -EIO;
+ if (i2400m_brh_get_signature(ack) != 0xcbbc) {
+ dev_err(dev, "boot-mode cmd %d: HW BUG? wrong signature "
+ "0x%04x\n", opcode, i2400m_brh_get_signature(ack));
+ goto error_ack_signature;
+ }
+ if (opcode != -1 && opcode != i2400m_brh_get_opcode(ack)) {
+ dev_err(dev, "boot-mode cmd %d: HW BUG? "
+ "received response for opcode %u, expected %u\n",
+ opcode, i2400m_brh_get_opcode(ack), opcode);
+ goto error_ack_opcode;
+ }
+ if (i2400m_brh_get_response(ack) != 0) { /* failed? */
+ dev_err(dev, "boot-mode cmd %d: error; hw response %u\n",
+ opcode, i2400m_brh_get_response(ack));
+ goto error_ack_failed;
+ }
+ if (ack_size < ack->data_size + sizeof(*ack)) {
+ dev_err(dev, "boot-mode cmd %d: SW BUG "
+ "driver provided only %zu bytes for %zu bytes "
+ "of data\n", opcode, ack_size,
+ (size_t) le32_to_cpu(ack->data_size) + sizeof(*ack));
+ goto error_ack_short_buffer;
+ }
+ result = ack_size;
+ /* Don't you love this stack of empty targets? Well, I don't
+ * either, but it helps track exactly who comes in here and
+ * why :) */
+error_ack_short_buffer:
+error_ack_failed:
+error_ack_opcode:
+error_ack_signature:
+out_raw:
+error_reboot_ack:
+error_reboot:
+error_ack_short:
+ d_fnend(8, dev, "(i2400m %p opcode %d ack %p size %zu) = %d\n",
+ i2400m, opcode, ack, ack_size, (int) result);
+ return result;
+}
+
+
+/**
+ * i2400m_bm_cmd - Execute a boot mode command
+ *
+ * @cmd: buffer containing the command data (pointing at the header).
+ * This data can be ANYWHERE (for USB, we will copy it to an
+ * specific buffer). Make sure everything is in proper little
+ * endian.
+ *
+ * A raw buffer can be also sent, just cast it and set flags to
+ * I2400M_BM_CMD_RAW.
+ *
+ * This function will generate a checksum for you if the
+ * checksum bit in the command is set (unless I2400M_BM_CMD_RAW
+ * is set).
+ *
+ * You can use the i2400m->bm_cmd_buf to stage your commands and
+ * send them.
+ *
+ * If NULL, no command is sent (we just wait for an ack).
+ *
+ * @cmd_size: size of the command. Will be auto padded to the
+ * bus-specific drivers padding requirements.
+ *
+ * @ack: buffer where to place the acknowledgement. If it is a regular
+ * command response, all fields will be returned with the right,
+ * native endianess.
+ *
+ * You *cannot* use i2400m->bm_ack_buf for this buffer.
+ *
+ * @ack_size: size of @ack, 16 aligned; you need to provide at least
+ * sizeof(*ack) bytes and then enough to contain the return data
+ * from the command
+ *
+ * @flags: see I2400M_BM_CMD_* above.
+ *
+ * @returns: bytes received by the notification; if < 0, an errno code
+ * denoting an error or:
+ *
+ * -ERESTARTSYS The device has rebooted
+ *
+ * Executes a boot-mode command and waits for a response, doing basic
+ * validation on it; if a zero length response is received, it retries
+ * waiting for a response until a non-zero one is received (timing out
+ * after %I2400M_BOOT_RETRIES retries).
+ */
+static
+ssize_t i2400m_bm_cmd(struct i2400m *i2400m,
+ const struct i2400m_bootrom_header *cmd, size_t cmd_size,
+ struct i2400m_bootrom_header *ack, size_t ack_size,
+ int flags)
+{
+ ssize_t result = -ENOMEM, rx_bytes;
+ struct device *dev = i2400m_dev(i2400m);
+ int opcode = cmd == NULL ? -1 : i2400m_brh_get_opcode(cmd);
+
+ d_fnstart(6, dev, "(i2400m %p cmd %p size %zu ack %p size %zu)\n",
+ i2400m, cmd, cmd_size, ack, ack_size);
+ BUG_ON(ack_size < sizeof(*ack));
+ BUG_ON(i2400m->boot_mode == 0);
+
+ if (cmd != NULL) { /* send the command */
+ memcpy(i2400m->bm_cmd_buf, cmd, cmd_size);
+ result = i2400m->bus_bm_cmd_send(i2400m, cmd, cmd_size, flags);
+ if (result < 0)
+ goto error_cmd_send;
+ if ((flags & I2400M_BM_CMD_RAW) == 0)
+ d_printf(5, dev,
+ "boot-mode cmd %d csum %u rr %u da %u: "
+ "addr 0x%04x size %u block csum 0x%04x\n",
+ opcode, i2400m_brh_get_use_checksum(cmd),
+ i2400m_brh_get_response_required(cmd),
+ i2400m_brh_get_direct_access(cmd),
+ cmd->target_addr, cmd->data_size,
+ cmd->block_checksum);
+ }
+ result = i2400m->bus_bm_wait_for_ack(i2400m, ack, ack_size);
+ if (result < 0) {
+ dev_err(dev, "boot-mode cmd %d: error waiting for an ack: %d\n",
+ opcode, (int) result); /* bah, %zd doesn't work */
+ goto error_wait_for_ack;
+ }
+ rx_bytes = result;
+ /* verify the ack and read more if neccessary [result is the
+ * final amount of bytes we get in the ack] */
+ result = __i2400m_bm_ack_verify(i2400m, opcode, ack, ack_size, flags);
+ if (result < 0)
+ goto error_bad_ack;
+ /* Don't you love this stack of empty targets? Well, I don't
+ * either, but it helps track exactly who comes in here and
+ * why :) */
+ result = rx_bytes;
+error_bad_ack:
+error_wait_for_ack:
+error_cmd_send:
+ d_fnend(6, dev, "(i2400m %p cmd %p size %zu ack %p size %zu) = %d\n",
+ i2400m, cmd, cmd_size, ack, ack_size, (int) result);
+ return result;
+}
+
+
+/**
+ * i2400m_download_chunk - write a single chunk of data to the device's memory
+ *
+ * @i2400m: device descriptor
+ * @buf: the buffer to write
+ * @buf_len: length of the buffer to write
+ * @addr: address in the device memory space
+ * @direct: bootrom write mode
+ * @do_csum: should a checksum validation be performed
+ */
+static int i2400m_download_chunk(struct i2400m *i2400m, const void *chunk,
+ size_t __chunk_len, unsigned long addr,
+ unsigned int direct, unsigned int do_csum)
+{
+ int ret;
+ size_t chunk_len = ALIGN(__chunk_len, I2400M_PL_PAD);
+ struct device *dev = i2400m_dev(i2400m);
+ struct {
+ struct i2400m_bootrom_header cmd;
+ u8 cmd_payload[chunk_len];
+ } __attribute__((packed)) *buf;
+ struct i2400m_bootrom_header ack;
+
+ d_fnstart(5, dev, "(i2400m %p chunk %p __chunk_len %zu addr 0x%08lx "
+ "direct %u do_csum %u)\n", i2400m, chunk, __chunk_len,
+ addr, direct, do_csum);
+ buf = i2400m->bm_cmd_buf;
+ memcpy(buf->cmd_payload, chunk, __chunk_len);
+ memset(buf->cmd_payload + __chunk_len, 0xad, chunk_len - __chunk_len);
+
+ buf->cmd.command = i2400m_brh_command(I2400M_BRH_WRITE,
+ __chunk_len & 0x3 ? 0 : do_csum,
+ __chunk_len & 0xf ? 0 : direct);
+ buf->cmd.target_addr = cpu_to_le32(addr);
+ buf->cmd.data_size = cpu_to_le32(__chunk_len);
+ ret = i2400m_bm_cmd(i2400m, &buf->cmd, sizeof(buf->cmd) + chunk_len,
+ &ack, sizeof(ack), 0);
+ if (ret >= 0)
+ ret = 0;
+ d_fnend(5, dev, "(i2400m %p chunk %p __chunk_len %zu addr 0x%08lx "
+ "direct %u do_csum %u) = %d\n", i2400m, chunk, __chunk_len,
+ addr, direct, do_csum, ret);
+ return ret;
+}
+
+
+/*
+ * Download a BCF file's sections to the device
+ *
+ * @i2400m: device descriptor
+ * @bcf: pointer to firmware data (followed by the payloads). Assumed
+ * verified and consistent.
+ * @bcf_len: length (in bytes) of the @bcf buffer.
+ *
+ * Returns: < 0 errno code on error or the offset to the jump instruction.
+ *
+ * Given a BCF file, downloads each section (a command and a payload)
+ * to the device's address space. Actually, it just executes each
+ * command i the BCF file.
+ *
+ * The section size has to be aligned to 4 bytes AND the padding has
+ * to be taken from the firmware file, as the signature takes it into
+ * account.
+ */
+static
+ssize_t i2400m_dnload_bcf(struct i2400m *i2400m,
+ const struct i2400m_bcf_hdr *bcf, size_t bcf_len)
+{
+ ssize_t ret;
+ struct device *dev = i2400m_dev(i2400m);
+ size_t offset, /* iterator offset */
+ data_size, /* Size of the data payload */
+ section_size, /* Size of the whole section (cmd + payload) */
+ section = 1;
+ const struct i2400m_bootrom_header *bh;
+ struct i2400m_bootrom_header ack;
+
+ d_fnstart(3, dev, "(i2400m %p bcf %p bcf_len %zu)\n",
+ i2400m, bcf, bcf_len);
+ /* Iterate over the command blocks in the BCF file that start
+ * after the header */
+ offset = le32_to_cpu(bcf->header_len) * sizeof(u32);
+ while (1) { /* start sending the file */
+ bh = (void *) bcf + offset;
+ data_size = le32_to_cpu(bh->data_size);
+ section_size = ALIGN(sizeof(*bh) + data_size, 4);
+ d_printf(7, dev,
+ "downloading section #%zu (@%zu %zu B) to 0x%08x\n",
+ section, offset, sizeof(*bh) + data_size,
+ le32_to_cpu(bh->target_addr));
+ if (i2400m_brh_get_opcode(bh) == I2400M_BRH_SIGNED_JUMP) {
+ /* Secure boot needs to stop here */
+ d_printf(5, dev, "signed jump found @%zu\n", offset);
+ break;
+ }
+ if (offset + section_size == bcf_len)
+ /* Non-secure boot stops here */
+ break;
+ if (offset + section_size > bcf_len) {
+ dev_err(dev, "fw %s: bad section #%zu, "
+ "end (@%zu) beyond EOF (@%zu)\n",
+ i2400m->bus_fw_name, section,
+ offset + section_size, bcf_len);
+ ret = -EINVAL;
+ goto error_section_beyond_eof;
+ }
+ __i2400m_msleep(20);
+ ret = i2400m_bm_cmd(i2400m, bh, section_size,
+ &ack, sizeof(ack), I2400M_BM_CMD_RAW);
+ if (ret < 0) {
+ dev_err(dev, "fw %s: section #%zu (@%zu %zu B) "
+ "failed %d\n", i2400m->bus_fw_name, section,
+ offset, sizeof(*bh) + data_size, (int) ret);
+ goto error_send;
+ }
+ offset += section_size;
+ section++;
+ }
+ ret = offset;
+error_section_beyond_eof:
+error_send:
+ d_fnend(3, dev, "(i2400m %p bcf %p bcf_len %zu) = %d\n",
+ i2400m, bcf, bcf_len, (int) ret);
+ return ret;
+}
+
+
+/*
+ * Do the final steps of uploading firmware
+ *
+ * Depending on the boot mode (signed vs non-signed), different
+ * actions need to be taken.
+ */
+static
+int i2400m_dnload_finalize(struct i2400m *i2400m,
+ const struct i2400m_bcf_hdr *bcf, size_t offset)
+{
+ int ret = 0;
+ struct device *dev = i2400m_dev(i2400m);
+ struct i2400m_bootrom_header *cmd, ack;
+ struct {
+ struct i2400m_bootrom_header cmd;
+ u8 cmd_pl[0];
+ } __attribute__((packed)) *cmd_buf;
+ size_t signature_block_offset, signature_block_size;
+
+ d_fnstart(3, dev, "offset %zu\n", offset);
+ cmd = (void *) bcf + offset;
+ if (i2400m->sboot == 0) {
+ struct i2400m_bootrom_header jump_ack;
+ d_printf(3, dev, "unsecure boot, jumping to 0x%08x\n",
+ le32_to_cpu(cmd->target_addr));
+ i2400m_brh_set_opcode(cmd, I2400M_BRH_JUMP);
+ cmd->data_size = 0;
+ ret = i2400m_bm_cmd(i2400m, cmd, sizeof(*cmd),
+ &jump_ack, sizeof(jump_ack), 0);
+ } else {
+ d_printf(3, dev, "secure boot, jumping to 0x%08x\n",
+ le32_to_cpu(cmd->target_addr));
+ cmd_buf = i2400m->bm_cmd_buf;
+ memcpy(&cmd_buf->cmd, cmd, sizeof(*cmd));
+ signature_block_offset =
+ sizeof(*bcf)
+ + le32_to_cpu(bcf->key_size) * sizeof(u32)
+ + le32_to_cpu(bcf->exponent_size) * sizeof(u32);
+ signature_block_size =
+ le32_to_cpu(bcf->modulus_size) * sizeof(u32);
+ memcpy(cmd_buf->cmd_pl, (void *) bcf + signature_block_offset,
+ signature_block_size);
+ ret = i2400m_bm_cmd(i2400m, &cmd_buf->cmd,
+ sizeof(cmd_buf->cmd) + signature_block_size,
+ &ack, sizeof(ack), I2400M_BM_CMD_RAW);
+ }
+ d_fnend(3, dev, "returning %d\n", ret);
+ return ret;
+}
+
+
+/**
+ * i2400m_bootrom_init - Reboots a powered device into boot mode
+ *
+ * @i2400m: device descriptor
+ * @flags:
+ * I2400M_BRI_SOFT: a reboot notification has been seen
+ * already, so don't wait for it.
+ *
+ * I2400M_BRI_NO_REBOOT: Don't send a reboot command, but wait
+ * for a reboot barker notification. This is a one shot; if
+ * the state machine needs to send a reboot command it will.
+ *
+ * Returns:
+ *
+ * < 0 errno code on error, 0 if ok.
+ *
+ * i2400m->sboot set to 0 for unsecure boot process, 1 for secure
+ * boot process.
+ *
+ * Description:
+ *
+ * Tries hard enough to put the device in boot-mode. There are two
+ * main phases to this:
+ *
+ * a. (1) send a reboot command and (2) get a reboot barker
+ * b. (1) ack the reboot sending a reboot barker and (2) getting an
+ * ack barker in return
+ *
+ * We want to skip (a) in some cases [soft]. The state machine is
+ * horrible, but it is basically: on each phase, send what has to be
+ * sent (if any), wait for the answer and act on the answer. We might
+ * have to backtrack and retry, so we keep a max tries counter for
+ * that.
+ *
+ * If we get a timeout after sending a warm reset, we do it again.
+ */
+int i2400m_bootrom_init(struct i2400m *i2400m, enum i2400m_bri flags)
+{
+ int result;
+ struct device *dev = i2400m_dev(i2400m);
+ struct i2400m_bootrom_header *cmd;
+ struct i2400m_bootrom_header ack;
+ int count = I2400M_BOOT_RETRIES;
+ int ack_timeout_cnt = 1;
+
+ BUILD_BUG_ON(sizeof(*cmd) != sizeof(i2400m_NBOOT_BARKER));
+ BUILD_BUG_ON(sizeof(ack) != sizeof(i2400m_ACK_BARKER));
+
+ d_fnstart(4, dev, "(i2400m %p flags 0x%08x)\n", i2400m, flags);
+ result = -ENOMEM;
+ cmd = i2400m->bm_cmd_buf;
+ if (flags & I2400M_BRI_SOFT)
+ goto do_reboot_ack;
+do_reboot:
+ if (--count < 0)
+ goto error_timeout;
+ d_printf(4, dev, "device reboot: reboot command [%d # left]\n",
+ count);
+ if ((flags & I2400M_BRI_NO_REBOOT) == 0)
+ i2400m->bus_reset(i2400m, I2400M_RT_WARM);
+ result = i2400m_bm_cmd(i2400m, NULL, 0, &ack, sizeof(ack),
+ I2400M_BM_CMD_RAW);
+ flags &= ~I2400M_BRI_NO_REBOOT;
+ switch (result) {
+ case -ERESTARTSYS:
+ d_printf(4, dev, "device reboot: got reboot barker\n");
+ break;
+ case -EISCONN: /* we don't know how it got here...but we follow it */
+ d_printf(4, dev, "device reboot: got ack barker - whatever\n");
+ goto do_reboot;
+ case -ETIMEDOUT: /* device has timed out, we might be in boot
+ * mode already and expecting an ack, let's try
+ * that */
+ dev_info(dev, "warm reset timed out, trying an ack\n");
+ goto do_reboot_ack;
+ case -EPROTO:
+ case -ESHUTDOWN: /* dev is gone */
+ case -EINTR: /* user cancelled */
+ goto error_dev_gone;
+ default:
+ dev_err(dev, "device reboot: error %d while waiting "
+ "for reboot barker - rebooting\n", result);
+ goto do_reboot;
+ }
+ /* At this point we ack back with 4 REBOOT barkers and expect
+ * 4 ACK barkers. This is ugly, as we send a raw command --
+ * hence the cast. _bm_cmd() will catch the reboot ack
+ * notification and report it as -EISCONN. */
+do_reboot_ack:
+ d_printf(4, dev, "device reboot ack: sending ack [%d # left]\n", count);
+ if (i2400m->sboot == 0)
+ memcpy(cmd, i2400m_NBOOT_BARKER,
+ sizeof(i2400m_NBOOT_BARKER));
+ else
+ memcpy(cmd, i2400m_SBOOT_BARKER,
+ sizeof(i2400m_SBOOT_BARKER));
+ result = i2400m_bm_cmd(i2400m, cmd, sizeof(*cmd),
+ &ack, sizeof(ack), I2400M_BM_CMD_RAW);
+ switch (result) {
+ case -ERESTARTSYS:
+ d_printf(4, dev, "reboot ack: got reboot barker - retrying\n");
+ if (--count < 0)
+ goto error_timeout;
+ goto do_reboot_ack;
+ case -EISCONN:
+ d_printf(4, dev, "reboot ack: got ack barker - good\n");
+ break;
+ case -ETIMEDOUT: /* no response, maybe it is the other type? */
+ if (ack_timeout_cnt-- >= 0) {
+ d_printf(4, dev, "reboot ack timedout: "
+ "trying the other type?\n");
+ i2400m->sboot = !i2400m->sboot;
+ goto do_reboot_ack;
+ } else {
+ dev_err(dev, "reboot ack timedout too long: "
+ "trying reboot\n");
+ goto do_reboot;
+ }
+ break;
+ case -EPROTO:
+ case -ESHUTDOWN: /* dev is gone */
+ goto error_dev_gone;
+ default:
+ dev_err(dev, "device reboot ack: error %d while waiting for "
+ "reboot ack barker - rebooting\n", result);
+ goto do_reboot;
+ }
+ d_printf(2, dev, "device reboot ack: got ack barker - boot done\n");
+ result = 0;
+exit_timeout:
+error_dev_gone:
+ d_fnend(4, dev, "(i2400m %p flags 0x%08x) = %d\n",
+ i2400m, flags, result);
+ return result;
+
+error_timeout:
+ dev_err(dev, "Timed out waiting for reboot ack, resetting\n");
+ i2400m->bus_reset(i2400m, I2400M_RT_BUS);
+ result = -ETIMEDOUT;
+ goto exit_timeout;
+}
+
+
+/*
+ * Read the MAC addr
+ *
+ * The position this function reads is fixed in device memory and
+ * always available, even without firmware.
+ *
+ * Note we specify we want to read only six bytes, but provide space
+ * for 16, as we always get it rounded up.
+ */
+int i2400m_read_mac_addr(struct i2400m *i2400m)
+{
+ int result;
+ struct device *dev = i2400m_dev(i2400m);
+ struct net_device *net_dev = i2400m->wimax_dev.net_dev;
+ struct i2400m_bootrom_header *cmd;
+ struct {
+ struct i2400m_bootrom_header ack;
+ u8 ack_pl[16];
+ } __attribute__((packed)) ack_buf;
+
+ d_fnstart(5, dev, "(i2400m %p)\n", i2400m);
+ cmd = i2400m->bm_cmd_buf;
+ cmd->command = i2400m_brh_command(I2400M_BRH_READ, 0, 1);
+ cmd->target_addr = cpu_to_le32(0x00203fe8);
+ cmd->data_size = cpu_to_le32(6);
+ result = i2400m_bm_cmd(i2400m, cmd, sizeof(*cmd),
+ &ack_buf.ack, sizeof(ack_buf), 0);
+ if (result < 0) {
+ dev_err(dev, "BM: read mac addr failed: %d\n", result);
+ goto error_read_mac;
+ }
+ d_printf(2, dev,
+ "mac addr is %02x:%02x:%02x:%02x:%02x:%02x\n",
+ ack_buf.ack_pl[0], ack_buf.ack_pl[1],
+ ack_buf.ack_pl[2], ack_buf.ack_pl[3],
+ ack_buf.ack_pl[4], ack_buf.ack_pl[5]);
+ if (i2400m->bus_bm_mac_addr_impaired == 1) {
+ ack_buf.ack_pl[0] = 0x00;
+ ack_buf.ack_pl[1] = 0x16;
+ ack_buf.ack_pl[2] = 0xd3;
+ get_random_bytes(&ack_buf.ack_pl[3], 3);
+ dev_err(dev, "BM is MAC addr impaired, faking MAC addr to "
+ "mac addr is %02x:%02x:%02x:%02x:%02x:%02x\n",
+ ack_buf.ack_pl[0], ack_buf.ack_pl[1],
+ ack_buf.ack_pl[2], ack_buf.ack_pl[3],
+ ack_buf.ack_pl[4], ack_buf.ack_pl[5]);
+ result = 0;
+ }
+ net_dev->addr_len = ETH_ALEN;
+ memcpy(net_dev->perm_addr, ack_buf.ack_pl, ETH_ALEN);
+ memcpy(net_dev->dev_addr, ack_buf.ack_pl, ETH_ALEN);
+error_read_mac:
+ d_fnend(5, dev, "(i2400m %p) = %d\n", i2400m, result);
+ return result;
+}
+
+
+/*
+ * Initialize a non signed boot
+ *
+ * This implies sending some magic values to the device's memory. Note
+ * we convert the values to little endian in the same array
+ * declaration.
+ */
+static
+int i2400m_dnload_init_nonsigned(struct i2400m *i2400m)
+{
+#define POKE(a, d) { \
+ .address = __constant_cpu_to_le32(a), \
+ .data = __constant_cpu_to_le32(d) \
+}
+ static const struct {
+ __le32 address;
+ __le32 data;
+ } i2400m_pokes[] = {
+ POKE(0x081A58, 0xA7810230),
+ POKE(0x080040, 0x00000000),
+ POKE(0x080048, 0x00000082),
+ POKE(0x08004C, 0x0000081F),
+ POKE(0x080054, 0x00000085),
+ POKE(0x080058, 0x00000180),
+ POKE(0x08005C, 0x00000018),
+ POKE(0x080060, 0x00000010),
+ POKE(0x080574, 0x00000001),
+ POKE(0x080550, 0x00000005),
+ POKE(0xAE0000, 0x00000000),
+ };
+#undef POKE
+ unsigned i;
+ int ret;
+ struct device *dev = i2400m_dev(i2400m);
+
+ dev_warn(dev, "WARNING!!! non-signed boot UNTESTED PATH!\n");
+
+ d_fnstart(5, dev, "(i2400m %p)\n", i2400m);
+ for (i = 0; i < ARRAY_SIZE(i2400m_pokes); i++) {
+ ret = i2400m_download_chunk(i2400m, &i2400m_pokes[i].data,
+ sizeof(i2400m_pokes[i].data),
+ i2400m_pokes[i].address, 1, 1);
+ if (ret < 0)
+ break;
+ }
+ d_fnend(5, dev, "(i2400m %p) = %d\n", i2400m, ret);
+ return ret;
+}
+
+
+/*
+ * Initialize the signed boot process
+ *
+ * @i2400m: device descriptor
+ *
+ * @bcf_hdr: pointer to the firmware header; assumes it is fully in
+ * memory (it has gone through basic validation).
+ *
+ * Returns: 0 if ok, < 0 errno code on error, -ERESTARTSYS if the hw
+ * rebooted.
+ *
+ * This writes the firmware BCF header to the device using the
+ * HASH_PAYLOAD_ONLY command.
+ */
+static
+int i2400m_dnload_init_signed(struct i2400m *i2400m,
+ const struct i2400m_bcf_hdr *bcf_hdr)
+{
+ int ret;
+ struct device *dev = i2400m_dev(i2400m);
+ struct {
+ struct i2400m_bootrom_header cmd;
+ struct i2400m_bcf_hdr cmd_pl;
+ } __attribute__((packed)) *cmd_buf;
+ struct i2400m_bootrom_header ack;
+
+ d_fnstart(5, dev, "(i2400m %p bcf_hdr %p)\n", i2400m, bcf_hdr);
+ cmd_buf = i2400m->bm_cmd_buf;
+ cmd_buf->cmd.command =
+ i2400m_brh_command(I2400M_BRH_HASH_PAYLOAD_ONLY, 0, 0);
+ cmd_buf->cmd.target_addr = 0;
+ cmd_buf->cmd.data_size = cpu_to_le32(sizeof(cmd_buf->cmd_pl));
+ memcpy(&cmd_buf->cmd_pl, bcf_hdr, sizeof(*bcf_hdr));
+ ret = i2400m_bm_cmd(i2400m, &cmd_buf->cmd, sizeof(*cmd_buf),
+ &ack, sizeof(ack), 0);
+ if (ret >= 0)
+ ret = 0;
+ d_fnend(5, dev, "(i2400m %p bcf_hdr %p) = %d\n", i2400m, bcf_hdr, ret);
+ return ret;
+}
+
+
+/*
+ * Initialize the firmware download at the device size
+ *
+ * Multiplex to the one that matters based on the device's mode
+ * (signed or non-signed).
+ */
+static
+int i2400m_dnload_init(struct i2400m *i2400m, const struct i2400m_bcf_hdr *bcf)
+{
+ int result;
+ struct device *dev = i2400m_dev(i2400m);
+ u32 module_id = le32_to_cpu(bcf->module_id);
+
+ if (i2400m->sboot == 0
+ && (module_id & I2400M_BCF_MOD_ID_POKES) == 0) {
+ /* non-signed boot process without pokes */
+ result = i2400m_dnload_init_nonsigned(i2400m);
+ if (result == -ERESTARTSYS)
+ return result;
+ if (result < 0)
+ dev_err(dev, "fw %s: non-signed download "
+ "initialization failed: %d\n",
+ i2400m->bus_fw_name, result);
+ } else if (i2400m->sboot == 0
+ && (module_id & I2400M_BCF_MOD_ID_POKES)) {
+ /* non-signed boot process with pokes, nothing to do */
+ result = 0;
+ } else { /* signed boot process */
+ result = i2400m_dnload_init_signed(i2400m, bcf);
+ if (result == -ERESTARTSYS)
+ return result;
+ if (result < 0)
+ dev_err(dev, "fw %s: signed boot download "
+ "initialization failed: %d\n",
+ i2400m->bus_fw_name, result);
+ }
+ return result;
+}
+
+
+/*
+ * Run quick consistency tests on the firmware file
+ *
+ * Check for the firmware being made for the i2400m device,
+ * etc...These checks are mostly informative, as the device will make
+ * them too; but the driver's response is more informative on what
+ * went wrong.
+ */
+static
+int i2400m_fw_check(struct i2400m *i2400m,
+ const struct i2400m_bcf_hdr *bcf,
+ size_t bcf_size)
+{
+ int result;
+ struct device *dev = i2400m_dev(i2400m);
+ unsigned module_type, header_len, major_version, minor_version,
+ module_id, module_vendor, date, size;
+
+ /* Check hard errors */
+ result = -EINVAL;
+ if (bcf_size < sizeof(*bcf)) { /* big enough header? */
+ dev_err(dev, "firmware %s too short: "
+ "%zu B vs %zu (at least) expected\n",
+ i2400m->bus_fw_name, bcf_size, sizeof(*bcf));
+ goto error;
+ }
+
+ module_type = bcf->module_type;
+ header_len = sizeof(u32) * le32_to_cpu(bcf->header_len);
+ major_version = le32_to_cpu(bcf->header_version) & 0xffff0000 >> 16;
+ minor_version = le32_to_cpu(bcf->header_version) & 0x0000ffff;
+ module_id = le32_to_cpu(bcf->module_id);
+ module_vendor = le32_to_cpu(bcf->module_vendor);
+ date = le32_to_cpu(bcf->date);
+ size = sizeof(u32) * le32_to_cpu(bcf->size);
+
+ if (bcf_size != size) { /* annoyingly paranoid */
+ dev_err(dev, "firmware %s: bad size, got "
+ "%zu B vs %u expected\n",
+ i2400m->bus_fw_name, bcf_size, size);
+ goto error;
+ }
+
+ d_printf(2, dev, "type 0x%x id 0x%x vendor 0x%x; header v%u.%u (%zu B) "
+ "date %08x (%zu B)\n",
+ module_type, module_id, module_vendor,
+ major_version, minor_version, (size_t) header_len,
+ date, (size_t) size);
+
+ if (module_type != 6) { /* built for the right hardware? */
+ dev_err(dev, "bad fw %s: unexpected module type 0x%x; "
+ "aborting\n", i2400m->bus_fw_name, module_type);
+ goto error;
+ }
+
+ /* Check soft-er errors */
+ result = 0;
+ if (module_vendor != 0x8086)
+ dev_err(dev, "bad fw %s? unexpected vendor 0x%04x\n",
+ i2400m->bus_fw_name, module_vendor);
+ if (date < 0x20080300)
+ dev_err(dev, "bad fw %s? build date too old %08x\n",
+ i2400m->bus_fw_name, date);
+error:
+ return result;
+}
+
+
+/*
+ * Download the firmware to the device
+ *
+ * @i2400m: device descriptor
+ * @bcf: pointer to loaded (and minimally verified for consistency)
+ * firmware
+ * @bcf_size: size of the @bcf buffer (header plus payloads)
+ *
+ * The process for doing this is described in this file's header.
+ *
+ * Note we only reinitialize boot-mode if the flags say so. Some hw
+ * iterations need it, some don't. In any case, if we loop, we always
+ * need to reinitialize the boot room, hence the flags modification.
+ */
+static
+int i2400m_fw_dnload(struct i2400m *i2400m, const struct i2400m_bcf_hdr *bcf,
+ size_t bcf_size, enum i2400m_bri flags)
+{
+ int ret = 0;
+ struct device *dev = i2400m_dev(i2400m);
+ int count = I2400M_BOOT_RETRIES;
+
+ d_fnstart(5, dev, "(i2400m %p bcf %p size %zu)\n",
+ i2400m, bcf, bcf_size);
+ i2400m->boot_mode = 1;
+hw_reboot:
+ if (count-- == 0) {
+ ret = -ERESTARTSYS;
+ dev_err(dev, "device rebooted too many times, aborting\n");
+ goto error_too_many_reboots;
+ }
+ if (flags & I2400M_BRI_MAC_REINIT) {
+ ret = i2400m_bootrom_init(i2400m, flags);
+ if (ret < 0) {
+ dev_err(dev, "bootrom init failed: %d\n", ret);
+ goto error_bootrom_init;
+ }
+ }
+ flags |= I2400M_BRI_MAC_REINIT;
+
+ /*
+ * Initialize the download, push the bytes to the device and
+ * then jump to the new firmware. Note @ret is passed with the
+ * offset of the jump instruction to _dnload_finalize()
+ */
+ ret = i2400m_dnload_init(i2400m, bcf); /* Init device's dnload */
+ if (ret == -ERESTARTSYS)
+ goto error_dev_rebooted;
+ if (ret < 0)
+ goto error_dnload_init;
+
+ ret = i2400m_dnload_bcf(i2400m, bcf, bcf_size);
+ if (ret == -ERESTARTSYS)
+ goto error_dev_rebooted;
+ if (ret < 0) {
+ dev_err(dev, "fw %s: download failed: %d\n",
+ i2400m->bus_fw_name, ret);
+ goto error_dnload_bcf;
+ }
+
+ ret = i2400m_dnload_finalize(i2400m, bcf, ret);
+ if (ret == -ERESTARTSYS)
+ goto error_dev_rebooted;
+ if (ret < 0) {
+ dev_err(dev, "fw %s: "
+ "download finalization failed: %d\n",
+ i2400m->bus_fw_name, ret);
+ goto error_dnload_finalize;
+ }
+
+ d_printf(2, dev, "fw %s successfully uploaded\n",
+ i2400m->bus_fw_name);
+ i2400m->boot_mode = 0;
+error_dnload_finalize:
+error_dnload_bcf:
+error_dnload_init:
+error_bootrom_init:
+error_too_many_reboots:
+ d_fnend(5, dev, "(i2400m %p bcf %p size %zu) = %d\n",
+ i2400m, bcf, bcf_size, ret);
+ return ret;
+
+error_dev_rebooted:
+ dev_err(dev, "device rebooted, %d tries left\n", count);
+ /* we got the notification already, no need to wait for it again */
+ flags |= I2400M_BRI_SOFT;
+ goto hw_reboot;
+}
+
+
+/**
+ * i2400m_dev_bootstrap - Bring the device to a known state and upload firmware
+ *
+ * @i2400m: device descriptor
+ *
+ * Returns: >= 0 if ok, < 0 errno code on error.
+ *
+ * This sets up the firmware upload environment, loads the firmware
+ * file from disk, verifies and then calls the firmware upload process
+ * per se.
+ *
+ * Can be called either from probe, or after a warm reset. Can not be
+ * called from within an interrupt. All the flow in this code is
+ * single-threade; all I/Os are synchronous.
+ */
+int i2400m_dev_bootstrap(struct i2400m *i2400m, enum i2400m_bri flags)
+{
+ int ret = 0;
+ struct device *dev = i2400m_dev(i2400m);
+ const struct firmware *fw;
+ const struct i2400m_bcf_hdr *bcf; /* Firmware data */
+
+ d_fnstart(5, dev, "(i2400m %p)\n", i2400m);
+ /* Load firmware files to memory. */
+ ret = request_firmware(&fw, i2400m->bus_fw_name, dev);
+ if (ret) {
+ dev_err(dev, "fw %s: request failed: %d\n",
+ i2400m->bus_fw_name, ret);
+ goto error_fw_req;
+ }
+ bcf = (void *) fw->data;
+
+ ret = i2400m_fw_check(i2400m, bcf, fw->size);
+ if (ret < 0)
+ goto error_fw_bad;
+ ret = i2400m_fw_dnload(i2400m, bcf, fw->size, flags);
+error_fw_bad:
+ release_firmware(fw);
+error_fw_req:
+ d_fnend(5, dev, "(i2400m %p) = %d\n", i2400m, ret);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i2400m_dev_bootstrap);
diff --git a/drivers/net/wimax/i2400m/i2400m-sdio.h b/drivers/net/wimax/i2400m/i2400m-sdio.h
new file mode 100644
index 00000000000..08c2fb73923
--- /dev/null
+++ b/drivers/net/wimax/i2400m/i2400m-sdio.h
@@ -0,0 +1,132 @@
+/*
+ * Intel Wireless WiMAX Connection 2400m
+ * SDIO-specific i2400m driver definitions
+ *
+ *
+ * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Intel Corporation
+ * Brian Bian
+ * Dirk Brandewie
+ * Inaky Perez-Gonzalez
+ * Yanir Lubetkin
+ * - Initial implementation
+ *
+ *
+ * This driver implements the bus-specific part of the i2400m for
+ * SDIO. Check i2400m.h for a generic driver description.
+ *
+ * ARCHITECTURE
+ *
+ * This driver sits under the bus-generic i2400m driver, providing the
+ * connection to the device.
+ *
+ * When probed, all the function pointers are setup and then the
+ * bus-generic code called. The generic driver will then use the
+ * provided pointers for uploading firmware (i2400ms_bus_bm*() in
+ * sdio-fw.c) and then setting up the device (i2400ms_dev_*() in
+ * sdio.c).
+ *
+ * Once firmware is uploaded, TX functions (sdio-tx.c) are called when
+ * data is ready for transmission in the TX fifo; then the SDIO IRQ is
+ * fired and data is available (sdio-rx.c), it is sent to the generic
+ * driver for processing with i2400m_rx.
+ */
+
+#ifndef __I2400M_SDIO_H__
+#define __I2400M_SDIO_H__
+
+#include "i2400m.h"
+
+/* Host-Device interface for SDIO */
+enum {
+ I2400MS_BLK_SIZE = 256,
+ I2400MS_PL_SIZE_MAX = 0x3E00,
+
+ I2400MS_DATA_ADDR = 0x0,
+ I2400MS_INTR_STATUS_ADDR = 0x13,
+ I2400MS_INTR_CLEAR_ADDR = 0x13,
+ I2400MS_INTR_ENABLE_ADDR = 0x14,
+ I2400MS_INTR_GET_SIZE_ADDR = 0x2C,
+ /* The number of ticks to wait for the device to signal that
+ * it is ready */
+ I2400MS_INIT_SLEEP_INTERVAL = 10,
+};
+
+
+/**
+ * struct i2400ms - descriptor for a SDIO connected i2400m
+ *
+ * @i2400m: bus-generic i2400m implementation; has to be first (see
+ * it's documentation in i2400m.h).
+ *
+ * @func: pointer to our SDIO function
+ *
+ * @tx_worker: workqueue struct used to TX data when the bus-generic
+ * code signals packets are pending for transmission to the device.
+ *
+ * @tx_workqueue: workqeueue used for data TX; we don't use the
+ * system's workqueue as that might cause deadlocks with code in
+ * the bus-generic driver.
+ */
+struct i2400ms {
+ struct i2400m i2400m; /* FIRST! See doc */
+ struct sdio_func *func;
+
+ struct work_struct tx_worker;
+ struct workqueue_struct *tx_workqueue;
+ char tx_wq_name[32];
+
+ struct dentry *debugfs_dentry;
+};
+
+
+static inline
+void i2400ms_init(struct i2400ms *i2400ms)
+{
+ i2400m_init(&i2400ms->i2400m);
+}
+
+
+extern int i2400ms_rx_setup(struct i2400ms *);
+extern void i2400ms_rx_release(struct i2400ms *);
+extern ssize_t __i2400ms_rx_get_size(struct i2400ms *);
+
+extern int i2400ms_tx_setup(struct i2400ms *);
+extern void i2400ms_tx_release(struct i2400ms *);
+extern void i2400ms_bus_tx_kick(struct i2400m *);
+
+extern ssize_t i2400ms_bus_bm_cmd_send(struct i2400m *,
+ const struct i2400m_bootrom_header *,
+ size_t, int);
+extern ssize_t i2400ms_bus_bm_wait_for_ack(struct i2400m *,
+ struct i2400m_bootrom_header *,
+ size_t);
+#endif /* #ifndef __I2400M_SDIO_H__ */
diff --git a/drivers/net/wimax/i2400m/i2400m-usb.h b/drivers/net/wimax/i2400m/i2400m-usb.h
new file mode 100644
index 00000000000..6f76558b170
--- /dev/null
+++ b/drivers/net/wimax/i2400m/i2400m-usb.h
@@ -0,0 +1,264 @@
+/*
+ * Intel Wireless WiMAX Connection 2400m
+ * USB-specific i2400m driver definitions
+ *
+ *
+ * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Intel Corporation
+ * Inaky Perez-Gonzalez
+ * Yanir Lubetkin
+ * - Initial implementation
+ *
+ *
+ * This driver implements the bus-specific part of the i2400m for
+ * USB. Check i2400m.h for a generic driver description.
+ *
+ * ARCHITECTURE
+ *
+ * This driver listens to notifications sent from the notification
+ * endpoint (in usb-notif.c); when data is ready to read, the code in
+ * there schedules a read from the device (usb-rx.c) and then passes
+ * the data to the generic RX code (rx.c).
+ *
+ * When the generic driver needs to send data (network or control), it
+ * queues up in the TX FIFO (tx.c) and that will notify the driver
+ * through the i2400m->bus_tx_kick() callback
+ * (usb-tx.c:i2400mu_bus_tx_kick) which will send the items in the
+ * FIFO queue.
+ *
+ * This driver, as well, implements the USB-specific ops for the generic
+ * driver to be able to setup/teardown communication with the device
+ * [i2400m_bus_dev_start() and i2400m_bus_dev_stop()], reseting the
+ * device [i2400m_bus_reset()] and performing firmware upload
+ * [i2400m_bus_bm_cmd() and i2400_bus_bm_wait_for_ack()].
+ */
+
+#ifndef __I2400M_USB_H__
+#define __I2400M_USB_H__
+
+#include "i2400m.h"
+#include
+
+
+/*
+ * Error Density Count: cheapo error density (over time) counter
+ *
+ * Originally by Reinette Chatre
+ *
+ * Embed an 'struct edc' somewhere. Each time there is a soft or
+ * retryable error, call edc_inc() and check if the error top
+ * watermark has been reached.
+ */
+enum {
+ EDC_MAX_ERRORS = 10,
+ EDC_ERROR_TIMEFRAME = HZ,
+};
+
+/* error density counter */
+struct edc {
+ unsigned long timestart;
+ u16 errorcount;
+};
+
+static inline void edc_init(struct edc *edc)
+{
+ edc->timestart = jiffies;
+}
+
+/**
+ * edc_inc - report a soft error and check if we are over the watermark
+ *
+ * @edc: pointer to error density counter.
+ * @max_err: maximum number of errors we can accept over the timeframe
+ * @timeframe: lenght of the timeframe (in jiffies).
+ *
+ * Returns: !0 1 if maximum acceptable errors per timeframe has been
+ * exceeded. 0 otherwise.
+ *
+ * This is way to determine if the number of acceptable errors per time
+ * period has been exceeded. It is not accurate as there are cases in which
+ * this scheme will not work, for example if there are periodic occurences
+ * of errors that straddle updates to the start time. This scheme is
+ * sufficient for our usage.
+ *
+ * To use, embed a 'struct edc' somewhere, initialize it with
+ * edc_init() and when an error hits:
+ *
+ * if (do_something_fails_with_a_soft_error) {
+ * if (edc_inc(&my->edc, MAX_ERRORS, MAX_TIMEFRAME))
+ * Ops, hard error, do something about it
+ * else
+ * Retry or ignore, depending on whatever
+ * }
+ */
+static inline int edc_inc(struct edc *edc, u16 max_err, u16 timeframe)
+{
+ unsigned long now;
+
+ now = jiffies;
+ if (now - edc->timestart > timeframe) {
+ edc->errorcount = 1;
+ edc->timestart = now;
+ } else if (++edc->errorcount > max_err) {
+ edc->errorcount = 0;
+ edc->timestart = now;
+ return 1;
+ }
+ return 0;
+}
+
+/* Host-Device interface for USB */
+enum {
+ I2400MU_MAX_NOTIFICATION_LEN = 256,
+ I2400MU_BLK_SIZE = 16,
+ I2400MU_PL_SIZE_MAX = 0x3EFF,
+
+ /* Endpoints */
+ I2400MU_EP_BULK_OUT = 0,
+ I2400MU_EP_NOTIFICATION,
+ I2400MU_EP_RESET_COLD,
+ I2400MU_EP_BULK_IN,
+};
+
+
+/**
+ * struct i2400mu - descriptor for a USB connected i2400m
+ *
+ * @i2400m: bus-generic i2400m implementation; has to be first (see
+ * it's documentation in i2400m.h).
+ *
+ * @usb_dev: pointer to our USB device
+ *
+ * @usb_iface: pointer to our USB interface
+ *
+ * @urb_edc: error density counter; used to keep a density-on-time tab
+ * on how many soft (retryable or ignorable) errors we get. If we
+ * go over the threshold, we consider the bus transport is failing
+ * too much and reset.
+ *
+ * @notif_urb: URB for receiving notifications from the device.
+ *
+ * @tx_kthread: thread we use for data TX. We use a thread because in
+ * order to do deep power saving and put the device to sleep, we
+ * need to call usb_autopm_*() [blocking functions].
+ *
+ * @tx_wq: waitqueue for the TX kthread to sleep when there is no data
+ * to be sent; when more data is available, it is woken up by
+ * i2400mu_bus_tx_kick().
+ *
+ * @rx_kthread: thread we use for data RX. We use a thread because in
+ * order to do deep power saving and put the device to sleep, we
+ * need to call usb_autopm_*() [blocking functions].
+ *
+ * @rx_wq: waitqueue for the RX kthread to sleep when there is no data
+ * to receive. When data is available, it is woken up by
+ * usb-notif.c:i2400mu_notification_grok().
+ *
+ * @rx_pending_count: number of rx-data-ready notifications that were
+ * still not handled by the RX kthread.
+ *
+ * @rx_size: current RX buffer size that is being used.
+ *
+ * @rx_size_acc: accumulator of the sizes of the previous read
+ * transactions.
+ *
+ * @rx_size_cnt: number of read transactions accumulated in
+ * @rx_size_acc.
+ *
+ * @do_autopm: disable(0)/enable(>0) calling the
+ * usb_autopm_get/put_interface() barriers when executing
+ * commands. See doc in i2400mu_suspend() for more information.
+ *
+ * @rx_size_auto_shrink: if true, the rx_size is shrinked
+ * automatically based on the average size of the received
+ * transactions. This allows the receive code to allocate smaller
+ * chunks of memory and thus reduce pressure on the memory
+ * allocator by not wasting so much space. By default it is
+ * enabled.
+ *
+ * @debugfs_dentry: hookup for debugfs files.
+ * These have to be in a separate directory, a child of
+ * (wimax_dev->debugfs_dentry) so they can be removed when the
+ * module unloads, as we don't keep each dentry.
+ */
+struct i2400mu {
+ struct i2400m i2400m; /* FIRST! See doc */
+
+ struct usb_device *usb_dev;
+ struct usb_interface *usb_iface;
+ struct edc urb_edc; /* Error density counter */
+
+ struct urb *notif_urb;
+ struct task_struct *tx_kthread;
+ wait_queue_head_t tx_wq;
+
+ struct task_struct *rx_kthread;
+ wait_queue_head_t rx_wq;
+ atomic_t rx_pending_count;
+ size_t rx_size, rx_size_acc, rx_size_cnt;
+ atomic_t do_autopm;
+ u8 rx_size_auto_shrink;
+
+ struct dentry *debugfs_dentry;
+};
+
+
+static inline
+void i2400mu_init(struct i2400mu *i2400mu)
+{
+ i2400m_init(&i2400mu->i2400m);
+ edc_init(&i2400mu->urb_edc);
+ init_waitqueue_head(&i2400mu->tx_wq);
+ atomic_set(&i2400mu->rx_pending_count, 0);
+ init_waitqueue_head(&i2400mu->rx_wq);
+ i2400mu->rx_size = PAGE_SIZE - sizeof(struct skb_shared_info);
+ atomic_set(&i2400mu->do_autopm, 1);
+ i2400mu->rx_size_auto_shrink = 1;
+}
+
+extern int i2400mu_notification_setup(struct i2400mu *);
+extern void i2400mu_notification_release(struct i2400mu *);
+
+extern int i2400mu_rx_setup(struct i2400mu *);
+extern void i2400mu_rx_release(struct i2400mu *);
+extern void i2400mu_rx_kick(struct i2400mu *);
+
+extern int i2400mu_tx_setup(struct i2400mu *);
+extern void i2400mu_tx_release(struct i2400mu *);
+extern void i2400mu_bus_tx_kick(struct i2400m *);
+
+extern ssize_t i2400mu_bus_bm_cmd_send(struct i2400m *,
+ const struct i2400m_bootrom_header *,
+ size_t, int);
+extern ssize_t i2400mu_bus_bm_wait_for_ack(struct i2400m *,
+ struct i2400m_bootrom_header *,
+ size_t);
+#endif /* #ifndef __I2400M_USB_H__ */
diff --git a/drivers/net/wimax/i2400m/i2400m.h b/drivers/net/wimax/i2400m/i2400m.h
new file mode 100644
index 00000000000..067c871cc22
--- /dev/null
+++ b/drivers/net/wimax/i2400m/i2400m.h
@@ -0,0 +1,755 @@
+/*
+ * Intel Wireless WiMAX Connection 2400m
+ * Declarations for bus-generic internal APIs
+ *
+ *
+ * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Intel Corporation
+ * Inaky Perez-Gonzalez
+ * Yanir Lubetkin
+ * - Initial implementation
+ *
+ *
+ * GENERAL DRIVER ARCHITECTURE
+ *
+ * The i2400m driver is split in the following two major parts:
+ *
+ * - bus specific driver
+ * - bus generic driver (this part)
+ *
+ * The bus specific driver sets up stuff specific to the bus the
+ * device is connected to (USB, SDIO, PCI, tam-tam...non-authoritative
+ * nor binding list) which is basically the device-model management
+ * (probe/disconnect, etc), moving data from device to kernel and
+ * back, doing the power saving details and reseting the device.
+ *
+ * For details on each bus-specific driver, see it's include file,
+ * i2400m-BUSNAME.h
+ *
+ * The bus-generic functionality break up is:
+ *
+ * - Firmware upload: fw.c - takes care of uploading firmware to the
+ * device. bus-specific driver just needs to provides a way to
+ * execute boot-mode commands and to reset the device.
+ *
+ * - RX handling: rx.c - receives data from the bus-specific code and
+ * feeds it to the network or WiMAX stack or uses it to modify
+ * the driver state. bus-specific driver only has to receive
+ * frames and pass them to this module.
+ *
+ * - TX handling: tx.c - manages the TX FIFO queue and provides means
+ * for the bus-specific TX code to pull data from the FIFO
+ * queue. bus-specific code just pulls frames from this module
+ * to sends them to the device.
+ *
+ * - netdev glue: netdev.c - interface with Linux networking
+ * stack. Pass around data frames, and configure when the
+ * device is up and running or shutdown (through ifconfig up /
+ * down). Bus-generic only.
+ *
+ * - control ops: control.c - implements various commmands for
+ * controlling the device. bus-generic only.
+ *
+ * - device model glue: driver.c - implements helpers for the
+ * device-model glue done by the bus-specific layer
+ * (setup/release the driver resources), turning the device on
+ * and off, handling the device reboots/resets and a few simple
+ * WiMAX stack ops.
+ *
+ * Code is also broken up in linux-glue / device-glue.
+ *
+ * Linux glue contains functions that deal mostly with gluing with the
+ * rest of the Linux kernel.
+ *
+ * Device-glue are functions that deal mostly with the way the device
+ * does things and talk the device's language.
+ *
+ * device-glue code is licensed BSD so other open source OSes can take
+ * it to implement their drivers.
+ *
+ *
+ * APIs AND HEADER FILES
+ *
+ * This bus generic code exports three APIs:
+ *
+ * - HDI (host-device interface) definitions common to all busses
+ * (include/linux/wimax/i2400m.h); these can be also used by user
+ * space code.
+ * - internal API for the bus-generic code
+ * - external API for the bus-specific drivers
+ *
+ *
+ * LIFE CYCLE:
+ *
+ * When the bus-specific driver probes, it allocates a network device
+ * with enough space for it's data structue, that must contain a
+ * &struct i2400m at the top.
+ *
+ * On probe, it needs to fill the i2400m members marked as [fill], as
+ * well as i2400m->wimax_dev.net_dev and call i2400m_setup(). The
+ * i2400m driver will only register with the WiMAX and network stacks;
+ * the only access done to the device is to read the MAC address so we
+ * can register a network device. This calls i2400m_dev_start() to
+ * load firmware, setup communication with the device and configure it
+ * for operation.
+ *
+ * At this point, control and data communications are possible.
+ *
+ * On disconnect/driver unload, the bus-specific disconnect function
+ * calls i2400m_release() to undo i2400m_setup(). i2400m_dev_stop()
+ * shuts the firmware down and releases resources uses to communicate
+ * with the device.
+ *
+ * While the device is up, it might reset. The bus-specific driver has
+ * to catch that situation and call i2400m_dev_reset_handle() to deal
+ * with it (reset the internal driver structures and go back to square
+ * one).
+ */
+
+#ifndef __I2400M_H__
+#define __I2400M_H__
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+/* Misc constants */
+enum {
+ /* Firmware uploading */
+ I2400M_BOOT_RETRIES = 3,
+ /* Size of the Boot Mode Command buffer */
+ I2400M_BM_CMD_BUF_SIZE = 16 * 1024,
+ I2400M_BM_ACK_BUF_SIZE = 256,
+};
+
+
+/* Firmware version we request when pulling the fw image file */
+#define I2400M_FW_VERSION "1.3"
+
+
+/**
+ * i2400m_reset_type - methods to reset a device
+ *
+ * @I2400M_RT_WARM: Reset without device disconnection, device handles
+ * are kept valid but state is back to power on, with firmware
+ * re-uploaded.
+ * @I2400M_RT_COLD: Tell the device to disconnect itself from the bus
+ * and reconnect. Renders all device handles invalid.
+ * @I2400M_RT_BUS: Tells the bus to reset the device; last measure
+ * used when both types above don't work.
+ */
+enum i2400m_reset_type {
+ I2400M_RT_WARM, /* first measure */
+ I2400M_RT_COLD, /* second measure */
+ I2400M_RT_BUS, /* call in artillery */
+};
+
+struct i2400m_reset_ctx;
+
+/**
+ * struct i2400m - descriptor for an Intel 2400m
+ *
+ * Members marked with [fill] must be filled out/initialized before
+ * calling i2400m_setup().
+ *
+ * @bus_tx_block_size: [fill] SDIO imposes a 256 block size, USB 16,
+ * so we have a tx_blk_size variable that the bus layer sets to
+ * tell the engine how much of that we need.
+ *
+ * @bus_pl_size_max: [fill] Maximum payload size.
+ *
+ * @bus_dev_start: [fill] Function called by the bus-generic code
+ * [i2400m_dev_start()] to setup the bus-specific communications
+ * to the the device. See LIFE CYCLE above.
+ *
+ * NOTE: Doesn't need to upload the firmware, as that is taken
+ * care of by the bus-generic code.
+ *
+ * @bus_dev_stop: [fill] Function called by the bus-generic code
+ * [i2400m_dev_stop()] to shutdown the bus-specific communications
+ * to the the device. See LIFE CYCLE above.
+ *
+ * This function does not need to reset the device, just tear down
+ * all the host resources created to handle communication with
+ * the device.
+ *
+ * @bus_tx_kick: [fill] Function called by the bus-generic code to let
+ * the bus-specific code know that there is data available in the
+ * TX FIFO for transmission to the device.
+ *
+ * This function cannot sleep.
+ *
+ * @bus_reset: [fill] Function called by the bus-generic code to reset
+ * the device in in various ways. Doesn't need to wait for the
+ * reset to finish.
+ *
+ * If warm or cold reset fail, this function is expected to do a
+ * bus-specific reset (eg: USB reset) to get the device to a
+ * working state (even if it implies device disconecction).
+ *
+ * Note the warm reset is used by the firmware uploader to
+ * reinitialize the device.
+ *
+ * IMPORTANT: this is called very early in the device setup
+ * process, so it cannot rely on common infrastructure being laid
+ * out.
+ *
+ * @bus_bm_cmd_send: [fill] Function called to send a boot-mode
+ * command. Flags are defined in 'enum i2400m_bm_cmd_flags'. This
+ * is synchronous and has to return 0 if ok or < 0 errno code in
+ * any error condition.
+ *
+ * @bus_bm_wait_for_ack: [fill] Function called to wait for a
+ * boot-mode notification (that can be a response to a previously
+ * issued command or an asynchronous one). Will read until all the
+ * indicated size is read or timeout. Reading more or less data
+ * than asked for is an error condition. Return 0 if ok, < 0 errno
+ * code on error.
+ *
+ * The caller to this function will check if the response is a
+ * barker that indicates the device going into reset mode.
+ *
+ * @bus_fw_name: [fill] name of the firmware image (in most cases,
+ * they are all the same for a single release, except that they
+ * have the type of the bus embedded in the name (eg:
+ * i2400m-fw-X-VERSION.sbcf, where X is the bus name).
+ *
+ * @bus_bm_mac_addr_impaired: [fill] Set to true if the device's MAC
+ * address provided in boot mode is kind of broken and needs to
+ * be re-read later on.
+ *
+ *
+ * @wimax_dev: WiMAX generic device for linkage into the kernel WiMAX
+ * stack. Due to the way a net_device is allocated, we need to
+ * force this to be the first field so that we can get from
+ * netdev_priv() the right pointer.
+ *
+ * @state: device's state (as reported by it)
+ *
+ * @state_wq: waitqueue that is woken up whenever the state changes
+ *
+ * @tx_lock: spinlock to protect TX members
+ *
+ * @tx_buf: FIFO buffer for TX; we queue data here
+ *
+ * @tx_in: FIFO index for incoming data. Note this doesn't wrap around
+ * and it is always greater than @tx_out.
+ *
+ * @tx_out: FIFO index for outgoing data
+ *
+ * @tx_msg: current TX message that is active in the FIFO for
+ * appending payloads.
+ *
+ * @tx_sequence: current sequence number for TX messages from the
+ * device to the host.
+ *
+ * @tx_msg_size: size of the current message being transmitted by the
+ * bus-specific code.
+ *
+ * @tx_pl_num: total number of payloads sent
+ *
+ * @tx_pl_max: maximum number of payloads sent in a TX message
+ *
+ * @tx_pl_min: minimum number of payloads sent in a TX message
+ *
+ * @tx_num: number of TX messages sent
+ *
+ * @tx_size_acc: number of bytes in all TX messages sent
+ * (this is different to net_dev's statistics as it also counts
+ * control messages).
+ *
+ * @tx_size_min: smallest TX message sent.
+ *
+ * @tx_size_max: biggest TX message sent.
+ *
+ * @rx_lock: spinlock to protect RX members
+ *
+ * @rx_pl_num: total number of payloads received
+ *
+ * @rx_pl_max: maximum number of payloads received in a RX message
+ *
+ * @rx_pl_min: minimum number of payloads received in a RX message
+ *
+ * @rx_num: number of RX messages received
+ *
+ * @rx_size_acc: number of bytes in all RX messages received
+ * (this is different to net_dev's statistics as it also counts
+ * control messages).
+ *
+ * @rx_size_min: smallest RX message received.
+ *
+ * @rx_size_max: buggest RX message received.
+ *
+ * @init_mutex: Mutex used for serializing the device bringup
+ * sequence; this way if the device reboots in the middle, we
+ * don't try to do a bringup again while we are tearing down the
+ * one that failed.
+ *
+ * Can't reuse @msg_mutex because from within the bringup sequence
+ * we need to send messages to the device and thus use @msg_mutex.
+ *
+ * @msg_mutex: mutex used to send control commands to the device (we
+ * only allow one at a time, per host-device interface design).
+ *
+ * @msg_completion: used to wait for an ack to a control command sent
+ * to the device.
+ *
+ * @ack_skb: used to store the actual ack to a control command if the
+ * reception of the command was successful. Otherwise, a ERR_PTR()
+ * errno code that indicates what failed with the ack reception.
+ *
+ * Only valid after @msg_completion is woken up. Only updateable
+ * if @msg_completion is armed. Only touched by
+ * i2400m_msg_to_dev().
+ *
+ * Protected by @rx_lock. In theory the command execution flow is
+ * sequential, but in case the device sends an out-of-phase or
+ * very delayed response, we need to avoid it trampling current
+ * execution.
+ *
+ * @bm_cmd_buf: boot mode command buffer for composing firmware upload
+ * commands.
+ *
+ * USB can't r/w to stack, vmalloc, etc...as well, we end up
+ * having to alloc/free a lot to compose commands, so we use these
+ * for stagging and not having to realloc all the time.
+ *
+ * This assumes the code always runs serialized. Only one thread
+ * can call i2400m_bm_cmd() at the same time.
+ *
+ * @bm_ack_buf: boot mode acknoledge buffer for staging reception of
+ * responses to commands.
+ *
+ * See @bm_cmd_buf.
+ *
+ * @work_queue: work queue for processing device reports. This
+ * workqueue cannot be used for processing TX or RX to the device,
+ * as from it we'll process device reports, which might require
+ * further communication with the device.
+ *
+ * @debugfs_dentry: hookup for debugfs files.
+ * These have to be in a separate directory, a child of
+ * (wimax_dev->debugfs_dentry) so they can be removed when the
+ * module unloads, as we don't keep each dentry.
+ */
+struct i2400m {
+ struct wimax_dev wimax_dev; /* FIRST! See doc */
+
+ unsigned updown:1; /* Network device is up or down */
+ unsigned boot_mode:1; /* is the device in boot mode? */
+ unsigned sboot:1; /* signed or unsigned fw boot */
+ unsigned ready:1; /* all probing steps done */
+ u8 trace_msg_from_user; /* echo rx msgs to 'trace' pipe */
+ /* typed u8 so debugfs/u8 can tweak */
+ enum i2400m_system_state state;
+ wait_queue_head_t state_wq; /* Woken up when on state updates */
+
+ size_t bus_tx_block_size;
+ size_t bus_pl_size_max;
+ int (*bus_dev_start)(struct i2400m *);
+ void (*bus_dev_stop)(struct i2400m *);
+ void (*bus_tx_kick)(struct i2400m *);
+ int (*bus_reset)(struct i2400m *, enum i2400m_reset_type);
+ ssize_t (*bus_bm_cmd_send)(struct i2400m *,
+ const struct i2400m_bootrom_header *,
+ size_t, int flags);
+ ssize_t (*bus_bm_wait_for_ack)(struct i2400m *,
+ struct i2400m_bootrom_header *, size_t);
+ const char *bus_fw_name;
+ unsigned bus_bm_mac_addr_impaired:1;
+
+ spinlock_t tx_lock; /* protect TX state */
+ void *tx_buf;
+ size_t tx_in, tx_out;
+ struct i2400m_msg_hdr *tx_msg;
+ size_t tx_sequence, tx_msg_size;
+ /* TX stats */
+ unsigned tx_pl_num, tx_pl_max, tx_pl_min,
+ tx_num, tx_size_acc, tx_size_min, tx_size_max;
+
+ /* RX stats */
+ spinlock_t rx_lock; /* protect RX state */
+ unsigned rx_pl_num, rx_pl_max, rx_pl_min,
+ rx_num, rx_size_acc, rx_size_min, rx_size_max;
+
+ struct mutex msg_mutex; /* serialize command execution */
+ struct completion msg_completion;
+ struct sk_buff *ack_skb; /* protected by rx_lock */
+
+ void *bm_ack_buf; /* for receiving acks over USB */
+ void *bm_cmd_buf; /* for issuing commands over USB */
+
+ struct workqueue_struct *work_queue;
+
+ struct mutex init_mutex; /* protect bringup seq */
+ struct i2400m_reset_ctx *reset_ctx; /* protected by init_mutex */
+
+ struct work_struct wake_tx_ws;
+ struct sk_buff *wake_tx_skb;
+
+ struct dentry *debugfs_dentry;
+};
+
+
+/*
+ * Initialize a 'struct i2400m' from all zeroes
+ *
+ * This is a bus-generic API call.
+ */
+static inline
+void i2400m_init(struct i2400m *i2400m)
+{
+ wimax_dev_init(&i2400m->wimax_dev);
+
+ i2400m->boot_mode = 1;
+ init_waitqueue_head(&i2400m->state_wq);
+
+ spin_lock_init(&i2400m->tx_lock);
+ i2400m->tx_pl_min = UINT_MAX;
+ i2400m->tx_size_min = UINT_MAX;
+
+ spin_lock_init(&i2400m->rx_lock);
+ i2400m->rx_pl_min = UINT_MAX;
+ i2400m->rx_size_min = UINT_MAX;
+
+ mutex_init(&i2400m->msg_mutex);
+ init_completion(&i2400m->msg_completion);
+
+ mutex_init(&i2400m->init_mutex);
+ /* wake_tx_ws is initialized in i2400m_tx_setup() */
+}
+
+
+/*
+ * Bus-generic internal APIs
+ * -------------------------
+ */
+
+static inline
+struct i2400m *wimax_dev_to_i2400m(struct wimax_dev *wimax_dev)
+{
+ return container_of(wimax_dev, struct i2400m, wimax_dev);
+}
+
+static inline
+struct i2400m *net_dev_to_i2400m(struct net_device *net_dev)
+{
+ return wimax_dev_to_i2400m(netdev_priv(net_dev));
+}
+
+/*
+ * Boot mode support
+ */
+
+/**
+ * i2400m_bm_cmd_flags - flags to i2400m_bm_cmd()
+ *
+ * @I2400M_BM_CMD_RAW: send the command block as-is, without doing any
+ * extra processing for adding CRC.
+ */
+enum i2400m_bm_cmd_flags {
+ I2400M_BM_CMD_RAW = 1 << 2,
+};
+
+/**
+ * i2400m_bri - Boot-ROM indicators
+ *
+ * Flags for i2400m_bootrom_init() and i2400m_dev_bootstrap() [which
+ * are passed from things like i2400m_setup()]. Can be combined with
+ * |.
+ *
+ * @I2400M_BRI_SOFT: The device rebooted already and a reboot
+ * barker received, proceed directly to ack the boot sequence.
+ * @I2400M_BRI_NO_REBOOT: Do not reboot the device and proceed
+ * directly to wait for a reboot barker from the device.
+ * @I2400M_BRI_MAC_REINIT: We need to reinitialize the boot
+ * rom after reading the MAC adress. This is quite a dirty hack,
+ * if you ask me -- the device requires the bootrom to be
+ * intialized after reading the MAC address.
+ */
+enum i2400m_bri {
+ I2400M_BRI_SOFT = 1 << 1,
+ I2400M_BRI_NO_REBOOT = 1 << 2,
+ I2400M_BRI_MAC_REINIT = 1 << 3,
+};
+
+extern void i2400m_bm_cmd_prepare(struct i2400m_bootrom_header *);
+extern int i2400m_dev_bootstrap(struct i2400m *, enum i2400m_bri);
+extern int i2400m_read_mac_addr(struct i2400m *);
+extern int i2400m_bootrom_init(struct i2400m *, enum i2400m_bri);
+
+/* Make/grok boot-rom header commands */
+
+static inline
+__le32 i2400m_brh_command(enum i2400m_brh_opcode opcode, unsigned use_checksum,
+ unsigned direct_access)
+{
+ return cpu_to_le32(
+ I2400M_BRH_SIGNATURE
+ | (direct_access ? I2400M_BRH_DIRECT_ACCESS : 0)
+ | I2400M_BRH_RESPONSE_REQUIRED /* response always required */
+ | (use_checksum ? I2400M_BRH_USE_CHECKSUM : 0)
+ | (opcode & I2400M_BRH_OPCODE_MASK));
+}
+
+static inline
+void i2400m_brh_set_opcode(struct i2400m_bootrom_header *hdr,
+ enum i2400m_brh_opcode opcode)
+{
+ hdr->command = cpu_to_le32(
+ (le32_to_cpu(hdr->command) & ~I2400M_BRH_OPCODE_MASK)
+ | (opcode & I2400M_BRH_OPCODE_MASK));
+}
+
+static inline
+unsigned i2400m_brh_get_opcode(const struct i2400m_bootrom_header *hdr)
+{
+ return le32_to_cpu(hdr->command) & I2400M_BRH_OPCODE_MASK;
+}
+
+static inline
+unsigned i2400m_brh_get_response(const struct i2400m_bootrom_header *hdr)
+{
+ return (le32_to_cpu(hdr->command) & I2400M_BRH_RESPONSE_MASK)
+ >> I2400M_BRH_RESPONSE_SHIFT;
+}
+
+static inline
+unsigned i2400m_brh_get_use_checksum(const struct i2400m_bootrom_header *hdr)
+{
+ return le32_to_cpu(hdr->command) & I2400M_BRH_USE_CHECKSUM;
+}
+
+static inline
+unsigned i2400m_brh_get_response_required(
+ const struct i2400m_bootrom_header *hdr)
+{
+ return le32_to_cpu(hdr->command) & I2400M_BRH_RESPONSE_REQUIRED;
+}
+
+static inline
+unsigned i2400m_brh_get_direct_access(const struct i2400m_bootrom_header *hdr)
+{
+ return le32_to_cpu(hdr->command) & I2400M_BRH_DIRECT_ACCESS;
+}
+
+static inline
+unsigned i2400m_brh_get_signature(const struct i2400m_bootrom_header *hdr)
+{
+ return (le32_to_cpu(hdr->command) & I2400M_BRH_SIGNATURE_MASK)
+ >> I2400M_BRH_SIGNATURE_SHIFT;
+}
+
+
+/*
+ * Driver / device setup and internal functions
+ */
+extern void i2400m_netdev_setup(struct net_device *net_dev);
+extern int i2400m_tx_setup(struct i2400m *);
+extern void i2400m_wake_tx_work(struct work_struct *);
+extern void i2400m_tx_release(struct i2400m *);
+
+extern void i2400m_net_rx(struct i2400m *, struct sk_buff *, unsigned,
+ const void *, int);
+enum i2400m_pt;
+extern int i2400m_tx(struct i2400m *, const void *, size_t, enum i2400m_pt);
+
+#ifdef CONFIG_DEBUG_FS
+extern int i2400m_debugfs_add(struct i2400m *);
+extern void i2400m_debugfs_rm(struct i2400m *);
+#else
+static inline int i2400m_debugfs_add(struct i2400m *i2400m)
+{
+ return 0;
+}
+static inline void i2400m_debugfs_rm(struct i2400m *i2400m) {}
+#endif
+
+/* Called by _dev_start()/_dev_stop() to initialize the device itself */
+extern int i2400m_dev_initialize(struct i2400m *);
+extern void i2400m_dev_shutdown(struct i2400m *);
+
+extern struct attribute_group i2400m_dev_attr_group;
+
+extern int i2400m_schedule_work(struct i2400m *,
+ void (*)(struct work_struct *), gfp_t);
+
+/* HDI message's payload description handling */
+
+static inline
+size_t i2400m_pld_size(const struct i2400m_pld *pld)
+{
+ return I2400M_PLD_SIZE_MASK & le32_to_cpu(pld->val);
+}
+
+static inline
+enum i2400m_pt i2400m_pld_type(const struct i2400m_pld *pld)
+{
+ return (I2400M_PLD_TYPE_MASK & le32_to_cpu(pld->val))
+ >> I2400M_PLD_TYPE_SHIFT;
+}
+
+static inline
+void i2400m_pld_set(struct i2400m_pld *pld, size_t size,
+ enum i2400m_pt type)
+{
+ pld->val = cpu_to_le32(
+ ((type << I2400M_PLD_TYPE_SHIFT) & I2400M_PLD_TYPE_MASK)
+ | (size & I2400M_PLD_SIZE_MASK));
+}
+
+
+/*
+ * API for the bus-specific drivers
+ * --------------------------------
+ */
+
+static inline
+struct i2400m *i2400m_get(struct i2400m *i2400m)
+{
+ dev_hold(i2400m->wimax_dev.net_dev);
+ return i2400m;
+}
+
+static inline
+void i2400m_put(struct i2400m *i2400m)
+{
+ dev_put(i2400m->wimax_dev.net_dev);
+}
+
+extern int i2400m_dev_reset_handle(struct i2400m *);
+
+/*
+ * _setup()/_release() are called by the probe/disconnect functions of
+ * the bus-specific drivers.
+ */
+extern int i2400m_setup(struct i2400m *, enum i2400m_bri bm_flags);
+extern void i2400m_release(struct i2400m *);
+
+extern int i2400m_rx(struct i2400m *, struct sk_buff *);
+extern struct i2400m_msg_hdr *i2400m_tx_msg_get(struct i2400m *, size_t *);
+extern void i2400m_tx_msg_sent(struct i2400m *);
+
+static const __le32 i2400m_NBOOT_BARKER[4] = {
+ __constant_cpu_to_le32(I2400M_NBOOT_BARKER),
+ __constant_cpu_to_le32(I2400M_NBOOT_BARKER),
+ __constant_cpu_to_le32(I2400M_NBOOT_BARKER),
+ __constant_cpu_to_le32(I2400M_NBOOT_BARKER)
+};
+
+static const __le32 i2400m_SBOOT_BARKER[4] = {
+ __constant_cpu_to_le32(I2400M_SBOOT_BARKER),
+ __constant_cpu_to_le32(I2400M_SBOOT_BARKER),
+ __constant_cpu_to_le32(I2400M_SBOOT_BARKER),
+ __constant_cpu_to_le32(I2400M_SBOOT_BARKER)
+};
+
+
+/*
+ * Utility functions
+ */
+
+static inline
+struct device *i2400m_dev(struct i2400m *i2400m)
+{
+ return i2400m->wimax_dev.net_dev->dev.parent;
+}
+
+/*
+ * Helper for scheduling simple work functions
+ *
+ * This struct can get any kind of payload attached (normally in the
+ * form of a struct where you pack the stuff you want to pass to the
+ * _work function).
+ */
+struct i2400m_work {
+ struct work_struct ws;
+ struct i2400m *i2400m;
+ u8 pl[0];
+};
+extern int i2400m_queue_work(struct i2400m *,
+ void (*)(struct work_struct *), gfp_t,
+ const void *, size_t);
+
+extern int i2400m_msg_check_status(const struct i2400m_l3l4_hdr *,
+ char *, size_t);
+extern int i2400m_msg_size_check(struct i2400m *,
+ const struct i2400m_l3l4_hdr *, size_t);
+extern struct sk_buff *i2400m_msg_to_dev(struct i2400m *, const void *, size_t);
+extern void i2400m_msg_to_dev_cancel_wait(struct i2400m *, int);
+extern void i2400m_msg_ack_hook(struct i2400m *,
+ const struct i2400m_l3l4_hdr *, size_t);
+extern void i2400m_report_hook(struct i2400m *,
+ const struct i2400m_l3l4_hdr *, size_t);
+extern int i2400m_cmd_enter_powersave(struct i2400m *);
+extern int i2400m_cmd_get_state(struct i2400m *);
+extern int i2400m_cmd_exit_idle(struct i2400m *);
+extern struct sk_buff *i2400m_get_device_info(struct i2400m *);
+extern int i2400m_firmware_check(struct i2400m *);
+extern int i2400m_set_init_config(struct i2400m *,
+ const struct i2400m_tlv_hdr **, size_t);
+
+static inline
+struct usb_endpoint_descriptor *usb_get_epd(struct usb_interface *iface, int ep)
+{
+ return &iface->cur_altsetting->endpoint[ep].desc;
+}
+
+extern int i2400m_op_rfkill_sw_toggle(struct wimax_dev *,
+ enum wimax_rf_state);
+extern void i2400m_report_tlv_rf_switches_status(
+ struct i2400m *, const struct i2400m_tlv_rf_switches_status *);
+
+
+/*
+ * Do a millisecond-sleep for allowing wireshark to dump all the data
+ * packets. Used only for debugging.
+ */
+static inline
+void __i2400m_msleep(unsigned ms)
+{
+#if 1
+#else
+ msleep(ms);
+#endif
+}
+
+/* Module parameters */
+
+extern int i2400m_idle_mode_disabled;
+
+
+#endif /* #ifndef __I2400M_H__ */
diff --git a/drivers/net/wimax/i2400m/netdev.c b/drivers/net/wimax/i2400m/netdev.c
new file mode 100644
index 00000000000..63fe708e8a3
--- /dev/null
+++ b/drivers/net/wimax/i2400m/netdev.c
@@ -0,0 +1,524 @@
+/*
+ * Intel Wireless WiMAX Connection 2400m
+ * Glue with the networking stack
+ *
+ *
+ * Copyright (C) 2007 Intel Corporation
+ * Yanir Lubetkin
+ * Inaky Perez-Gonzalez
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * This implements an ethernet device for the i2400m.
+ *
+ * We fake being an ethernet device to simplify the support from user
+ * space and from the other side. The world is (sadly) configured to
+ * take in only Ethernet devices...
+ *
+ * Because of this, currently there is an copy-each-rxed-packet
+ * overhead on the RX path. Each IP packet has to be reallocated to
+ * add an ethernet header (as there is no space in what we get from
+ * the device). This is a known drawback and coming versions of the
+ * device's firmware are being changed to add header space that can be
+ * used to insert the ethernet header without having to reallocate and
+ * copy.
+ *
+ * TX error handling is tricky; because we have to FIFO/queue the
+ * buffers for transmission (as the hardware likes it aggregated), we
+ * just give the skb to the TX subsystem and by the time it is
+ * transmitted, we have long forgotten about it. So we just don't care
+ * too much about it.
+ *
+ * Note that when the device is in idle mode with the basestation, we
+ * need to negotiate coming back up online. That involves negotiation
+ * and possible user space interaction. Thus, we defer to a workqueue
+ * to do all that. By default, we only queue a single packet and drop
+ * the rest, as potentially the time to go back from idle to normal is
+ * long.
+ *
+ * ROADMAP
+ *
+ * i2400m_open Called on ifconfig up
+ * i2400m_stop Called on ifconfig down
+ *
+ * i2400m_hard_start_xmit Called by the network stack to send a packet
+ * i2400m_net_wake_tx Wake up device from basestation-IDLE & TX
+ * i2400m_wake_tx_work
+ * i2400m_cmd_exit_idle
+ * i2400m_tx
+ * i2400m_net_tx TX a data frame
+ * i2400m_tx
+ *
+ * i2400m_change_mtu Called on ifconfig mtu XXX
+ *
+ * i2400m_tx_timeout Called when the device times out
+ *
+ * i2400m_net_rx Called by the RX code when a data frame is
+ * available.
+ * i2400m_netdev_setup Called to setup all the netdev stuff from
+ * alloc_netdev.
+ */
+#include
+#include
+#include "i2400m.h"
+
+
+#define D_SUBMODULE netdev
+#include "debug-levels.h"
+
+enum {
+/* netdev interface */
+ /*
+ * Out of NWG spec (R1_v1.2.2), 3.3.3 ASN Bearer Plane MTU Size
+ *
+ * The MTU is 1400 or less
+ */
+ I2400M_MAX_MTU = 1400,
+ I2400M_TX_TIMEOUT = HZ,
+ I2400M_TX_QLEN = 5,
+};
+
+
+static
+int i2400m_open(struct net_device *net_dev)
+{
+ int result;
+ struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
+ struct device *dev = i2400m_dev(i2400m);
+
+ d_fnstart(3, dev, "(net_dev %p [i2400m %p])\n", net_dev, i2400m);
+ if (i2400m->ready == 0) {
+ dev_err(dev, "Device is still initializing\n");
+ result = -EBUSY;
+ } else
+ result = 0;
+ d_fnend(3, dev, "(net_dev %p [i2400m %p]) = %d\n",
+ net_dev, i2400m, result);
+ return result;
+}
+
+
+/*
+ *
+ * On kernel versions where cancel_work_sync() didn't return anything,
+ * we rely on wake_tx_skb() being non-NULL.
+ */
+static
+int i2400m_stop(struct net_device *net_dev)
+{
+ struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
+ struct device *dev = i2400m_dev(i2400m);
+
+ d_fnstart(3, dev, "(net_dev %p [i2400m %p])\n", net_dev, i2400m);
+ /* See i2400m_hard_start_xmit(), references are taken there
+ * and here we release them if the work was still
+ * pending. Note we can't differentiate work not pending vs
+ * never scheduled, so the NULL check does that. */
+ if (cancel_work_sync(&i2400m->wake_tx_ws) == 0
+ && i2400m->wake_tx_skb != NULL) {
+ unsigned long flags;
+ struct sk_buff *wake_tx_skb;
+ spin_lock_irqsave(&i2400m->tx_lock, flags);
+ wake_tx_skb = i2400m->wake_tx_skb; /* compat help */
+ i2400m->wake_tx_skb = NULL; /* compat help */
+ spin_unlock_irqrestore(&i2400m->tx_lock, flags);
+ i2400m_put(i2400m);
+ kfree_skb(wake_tx_skb);
+ }
+ d_fnend(3, dev, "(net_dev %p [i2400m %p]) = 0\n", net_dev, i2400m);
+ return 0;
+}
+
+
+/*
+ * Wake up the device and transmit a held SKB, then restart the net queue
+ *
+ * When the device goes into basestation-idle mode, we need to tell it
+ * to exit that mode; it will negotiate with the base station, user
+ * space may have to intervene to rehandshake crypto and then tell us
+ * when it is ready to transmit the packet we have "queued". Still we
+ * need to give it sometime after it reports being ok.
+ *
+ * On error, there is not much we can do. If the error was on TX, we
+ * still wake the queue up to see if the next packet will be luckier.
+ *
+ * If _cmd_exit_idle() fails...well, it could be many things; most
+ * commonly it is that something else took the device out of IDLE mode
+ * (for example, the base station). In that case we get an -EILSEQ and
+ * we are just going to ignore that one. If the device is back to
+ * connected, then fine -- if it is someother state, the packet will
+ * be dropped anyway.
+ */
+void i2400m_wake_tx_work(struct work_struct *ws)
+{
+ int result;
+ struct i2400m *i2400m = container_of(ws, struct i2400m, wake_tx_ws);
+ struct device *dev = i2400m_dev(i2400m);
+ struct sk_buff *skb = i2400m->wake_tx_skb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&i2400m->tx_lock, flags);
+ skb = i2400m->wake_tx_skb;
+ i2400m->wake_tx_skb = NULL;
+ spin_unlock_irqrestore(&i2400m->tx_lock, flags);
+
+ d_fnstart(3, dev, "(ws %p i2400m %p skb %p)\n", ws, i2400m, skb);
+ result = -EINVAL;
+ if (skb == NULL) {
+ dev_err(dev, "WAKE&TX: skb dissapeared!\n");
+ goto out_put;
+ }
+ result = i2400m_cmd_exit_idle(i2400m);
+ if (result == -EILSEQ)
+ result = 0;
+ if (result < 0) {
+ dev_err(dev, "WAKE&TX: device didn't get out of idle: "
+ "%d\n", result);
+ goto error;
+ }
+ result = wait_event_timeout(i2400m->state_wq,
+ i2400m->state != I2400M_SS_IDLE, 5 * HZ);
+ if (result == 0)
+ result = -ETIMEDOUT;
+ if (result < 0) {
+ dev_err(dev, "WAKE&TX: error waiting for device to exit IDLE: "
+ "%d\n", result);
+ goto error;
+ }
+ msleep(20); /* device still needs some time or it drops it */
+ result = i2400m_tx(i2400m, skb->data, skb->len, I2400M_PT_DATA);
+ netif_wake_queue(i2400m->wimax_dev.net_dev);
+error:
+ kfree_skb(skb); /* refcount transferred by _hard_start_xmit() */
+out_put:
+ i2400m_put(i2400m);
+ d_fnend(3, dev, "(ws %p i2400m %p skb %p) = void [%d]\n",
+ ws, i2400m, skb, result);
+}
+
+
+/*
+ * Prepare the data payload TX header
+ *
+ * The i2400m expects a 4 byte header in front of a data packet.
+ *
+ * Because we pretend to be an ethernet device, this packet comes with
+ * an ethernet header. Pull it and push our header.
+ */
+static
+void i2400m_tx_prep_header(struct sk_buff *skb)
+{
+ struct i2400m_pl_data_hdr *pl_hdr;
+ skb_pull(skb, ETH_HLEN);
+ pl_hdr = (struct i2400m_pl_data_hdr *) skb_push(skb, sizeof(*pl_hdr));
+ pl_hdr->reserved = 0;
+}
+
+
+/*
+ * TX an skb to an idle device
+ *
+ * When the device is in basestation-idle mode, we need to wake it up
+ * and then TX. So we queue a work_struct for doing so.
+ *
+ * We need to get an extra ref for the skb (so it is not dropped), as
+ * well as be careful not to queue more than one request (won't help
+ * at all). If more than one request comes or there are errors, we
+ * just drop the packets (see i2400m_hard_start_xmit()).
+ */
+static
+int i2400m_net_wake_tx(struct i2400m *i2400m, struct net_device *net_dev,
+ struct sk_buff *skb)
+{
+ int result;
+ struct device *dev = i2400m_dev(i2400m);
+ unsigned long flags;
+
+ d_fnstart(3, dev, "(skb %p net_dev %p)\n", skb, net_dev);
+ if (net_ratelimit()) {
+ d_printf(3, dev, "WAKE&NETTX: "
+ "skb %p sending %d bytes to radio\n",
+ skb, skb->len);
+ d_dump(4, dev, skb->data, skb->len);
+ }
+ /* We hold a ref count for i2400m and skb, so when
+ * stopping() the device, we need to cancel that work
+ * and if pending, release those resources. */
+ result = 0;
+ spin_lock_irqsave(&i2400m->tx_lock, flags);
+ if (!work_pending(&i2400m->wake_tx_ws)) {
+ netif_stop_queue(net_dev);
+ i2400m_get(i2400m);
+ i2400m->wake_tx_skb = skb_get(skb); /* transfer ref count */
+ i2400m_tx_prep_header(skb);
+ result = schedule_work(&i2400m->wake_tx_ws);
+ WARN_ON(result == 0);
+ }
+ spin_unlock_irqrestore(&i2400m->tx_lock, flags);
+ if (result == 0) {
+ /* Yes, this happens even if we stopped the
+ * queue -- blame the queue disciplines that
+ * queue without looking -- I guess there is a reason
+ * for that. */
+ if (net_ratelimit())
+ d_printf(1, dev, "NETTX: device exiting idle, "
+ "dropping skb %p, queue running %d\n",
+ skb, netif_queue_stopped(net_dev));
+ result = -EBUSY;
+ }
+ d_fnend(3, dev, "(skb %p net_dev %p) = %d\n", skb, net_dev, result);
+ return result;
+}
+
+
+/*
+ * Transmit a packet to the base station on behalf of the network stack.
+ *
+ * Returns: 0 if ok, < 0 errno code on error.
+ *
+ * We need to pull the ethernet header and add the hardware header,
+ * which is currently set to all zeroes and reserved.
+ */
+static
+int i2400m_net_tx(struct i2400m *i2400m, struct net_device *net_dev,
+ struct sk_buff *skb)
+{
+ int result;
+ struct device *dev = i2400m_dev(i2400m);
+
+ d_fnstart(3, dev, "(i2400m %p net_dev %p skb %p)\n",
+ i2400m, net_dev, skb);
+ /* FIXME: check eth hdr, only IPv4 is routed by the device as of now */
+ net_dev->trans_start = jiffies;
+ i2400m_tx_prep_header(skb);
+ d_printf(3, dev, "NETTX: skb %p sending %d bytes to radio\n",
+ skb, skb->len);
+ d_dump(4, dev, skb->data, skb->len);
+ result = i2400m_tx(i2400m, skb->data, skb->len, I2400M_PT_DATA);
+ d_fnend(3, dev, "(i2400m %p net_dev %p skb %p) = %d\n",
+ i2400m, net_dev, skb, result);
+ return result;
+}
+
+
+/*
+ * Transmit a packet to the base station on behalf of the network stack
+ *
+ *
+ * Returns: NETDEV_TX_OK (always, even in case of error)
+ *
+ * In case of error, we just drop it. Reasons:
+ *
+ * - we add a hw header to each skb, and if the network stack
+ * retries, we have no way to know if that skb has it or not.
+ *
+ * - network protocols have their own drop-recovery mechanisms
+ *
+ * - there is not much else we can do
+ *
+ * If the device is idle, we need to wake it up; that is an operation
+ * that will sleep. See i2400m_net_wake_tx() for details.
+ */
+static
+int i2400m_hard_start_xmit(struct sk_buff *skb,
+ struct net_device *net_dev)
+{
+ int result;
+ struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
+ struct device *dev = i2400m_dev(i2400m);
+
+ d_fnstart(3, dev, "(skb %p net_dev %p)\n", skb, net_dev);
+ if (i2400m->state == I2400M_SS_IDLE)
+ result = i2400m_net_wake_tx(i2400m, net_dev, skb);
+ else
+ result = i2400m_net_tx(i2400m, net_dev, skb);
+ if (result < 0)
+ net_dev->stats.tx_dropped++;
+ else {
+ net_dev->stats.tx_packets++;
+ net_dev->stats.tx_bytes += skb->len;
+ }
+ kfree_skb(skb);
+ result = NETDEV_TX_OK;
+ d_fnend(3, dev, "(skb %p net_dev %p) = %d\n", skb, net_dev, result);
+ return result;
+}
+
+
+static
+int i2400m_change_mtu(struct net_device *net_dev, int new_mtu)
+{
+ int result;
+ struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
+ struct device *dev = i2400m_dev(i2400m);
+
+ if (new_mtu >= I2400M_MAX_MTU) {
+ dev_err(dev, "Cannot change MTU to %d (max is %d)\n",
+ new_mtu, I2400M_MAX_MTU);
+ result = -EINVAL;
+ } else {
+ net_dev->mtu = new_mtu;
+ result = 0;
+ }
+ return result;
+}
+
+
+static
+void i2400m_tx_timeout(struct net_device *net_dev)
+{
+ /*
+ * We might want to kick the device
+ *
+ * There is not much we can do though, as the device requires
+ * that we send the data aggregated. By the time we receive
+ * this, there might be data pending to be sent or not...
+ */
+ net_dev->stats.tx_errors++;
+ return;
+}
+
+
+/*
+ * Create a fake ethernet header
+ *
+ * For emulating an ethernet device, every received IP header has to
+ * be prefixed with an ethernet header.
+ *
+ * What we receive has (potentially) many IP packets concatenated with
+ * no ETH_HLEN bytes prefixed. Thus there is no space for an eth
+ * header.
+ *
+ * We would have to reallocate or do ugly fragment tricks in order to
+ * add it.
+ *
+ * But what we do is use the header space of the RX transaction
+ * (*msg_hdr) as we don't need it anymore; then we'll point all the
+ * data skbs there, as they share the same backing store.
+ *
+ * We only support IPv4 for v3 firmware.
+ */
+static
+void i2400m_rx_fake_eth_header(struct net_device *net_dev,
+ void *_eth_hdr)
+{
+ struct ethhdr *eth_hdr = _eth_hdr;
+
+ memcpy(eth_hdr->h_dest, net_dev->dev_addr, sizeof(eth_hdr->h_dest));
+ memset(eth_hdr->h_source, 0, sizeof(eth_hdr->h_dest));
+ eth_hdr->h_proto = __constant_cpu_to_be16(ETH_P_IP);
+}
+
+
+/*
+ * i2400m_net_rx - pass a network packet to the stack
+ *
+ * @i2400m: device instance
+ * @skb_rx: the skb where the buffer pointed to by @buf is
+ * @i: 1 if payload is the only one
+ * @buf: pointer to the buffer containing the data
+ * @len: buffer's length
+ *
+ * We just clone the skb and set it up so that it's skb->data pointer
+ * points to "buf" and it's length.
+ *
+ * Note that if the payload is the last (or the only one) in a
+ * multi-payload message, we don't clone the SKB but just reuse it.
+ *
+ * This function is normally run from a thread context. However, we
+ * still use netif_rx() instead of netif_receive_skb() as was
+ * recommended in the mailing list. Reason is in some stress tests
+ * when sending/receiving a lot of data we seem to hit a softlock in
+ * the kernel's TCP implementation [aroudn tcp_delay_timer()]. Using
+ * netif_rx() took care of the issue.
+ *
+ * This is, of course, still open to do more research on why running
+ * with netif_receive_skb() hits this softlock. FIXME.
+ *
+ * FIXME: currently we don't do any efforts at distinguishing if what
+ * we got was an IPv4 or IPv6 header, to setup the protocol field
+ * correctly.
+ */
+void i2400m_net_rx(struct i2400m *i2400m, struct sk_buff *skb_rx,
+ unsigned i, const void *buf, int buf_len)
+{
+ struct net_device *net_dev = i2400m->wimax_dev.net_dev;
+ struct device *dev = i2400m_dev(i2400m);
+ struct sk_buff *skb;
+
+ d_fnstart(2, dev, "(i2400m %p buf %p buf_len %d)\n",
+ i2400m, buf, buf_len);
+ if (i) {
+ skb = skb_get(skb_rx);
+ d_printf(2, dev, "RX: reusing first payload skb %p\n", skb);
+ skb_pull(skb, buf - (void *) skb->data);
+ skb_trim(skb, (void *) skb_end_pointer(skb) - buf);
+ } else {
+ /* Yes, this is bad -- a lot of overhead -- see
+ * comments at the top of the file */
+ skb = __netdev_alloc_skb(net_dev, buf_len, GFP_KERNEL);
+ if (skb == NULL) {
+ dev_err(dev, "NETRX: no memory to realloc skb\n");
+ net_dev->stats.rx_dropped++;
+ goto error_skb_realloc;
+ }
+ memcpy(skb_put(skb, buf_len), buf, buf_len);
+ }
+ i2400m_rx_fake_eth_header(i2400m->wimax_dev.net_dev,
+ skb->data - ETH_HLEN);
+ skb_set_mac_header(skb, -ETH_HLEN);
+ skb->dev = i2400m->wimax_dev.net_dev;
+ skb->protocol = htons(ETH_P_IP);
+ net_dev->stats.rx_packets++;
+ net_dev->stats.rx_bytes += buf_len;
+ d_printf(3, dev, "NETRX: receiving %d bytes to network stack\n",
+ buf_len);
+ d_dump(4, dev, buf, buf_len);
+ netif_rx_ni(skb); /* see notes in function header */
+error_skb_realloc:
+ d_fnend(2, dev, "(i2400m %p buf %p buf_len %d) = void\n",
+ i2400m, buf, buf_len);
+}
+
+
+/**
+ * i2400m_netdev_setup - Setup setup @net_dev's i2400m private data
+ *
+ * Called by alloc_netdev()
+ */
+void i2400m_netdev_setup(struct net_device *net_dev)
+{
+ d_fnstart(3, NULL, "(net_dev %p)\n", net_dev);
+ ether_setup(net_dev);
+ net_dev->mtu = I2400M_MAX_MTU;
+ net_dev->tx_queue_len = I2400M_TX_QLEN;
+ net_dev->features =
+ NETIF_F_VLAN_CHALLENGED
+ | NETIF_F_HIGHDMA;
+ net_dev->flags =
+ IFF_NOARP /* i2400m is apure IP device */
+ & (~IFF_BROADCAST /* i2400m is P2P */
+ & ~IFF_MULTICAST);
+ net_dev->watchdog_timeo = I2400M_TX_TIMEOUT;
+ net_dev->open = i2400m_open;
+ net_dev->stop = i2400m_stop;
+ net_dev->hard_start_xmit = i2400m_hard_start_xmit;
+ net_dev->change_mtu = i2400m_change_mtu;
+ net_dev->tx_timeout = i2400m_tx_timeout;
+ d_fnend(3, NULL, "(net_dev %p) = void\n", net_dev);
+}
+EXPORT_SYMBOL_GPL(i2400m_netdev_setup);
+
diff --git a/drivers/net/wimax/i2400m/op-rfkill.c b/drivers/net/wimax/i2400m/op-rfkill.c
new file mode 100644
index 00000000000..487ec58cea4
--- /dev/null
+++ b/drivers/net/wimax/i2400m/op-rfkill.c
@@ -0,0 +1,207 @@
+/*
+ * Intel Wireless WiMAX Connection 2400m
+ * Implement backend for the WiMAX stack rfkill support
+ *
+ *
+ * Copyright (C) 2007-2008 Intel Corporation
+ * Inaky Perez-Gonzalez
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * The WiMAX kernel stack integrates into RF-Kill and keeps the
+ * switches's status. We just need to:
+ *
+ * - report changes in the HW RF Kill switch [with
+ * wimax_rfkill_{sw,hw}_report(), which happens when we detect those
+ * indications coming through hardware reports]. We also do it on
+ * initialization to let the stack know the intial HW state.
+ *
+ * - implement indications from the stack to change the SW RF Kill
+ * switch (coming from sysfs, the wimax stack or user space).
+ */
+#include "i2400m.h"
+#include
+
+
+
+#define D_SUBMODULE rfkill
+#include "debug-levels.h"
+
+/*
+ * Return true if the i2400m radio is in the requested wimax_rf_state state
+ *
+ */
+static
+int i2400m_radio_is(struct i2400m *i2400m, enum wimax_rf_state state)
+{
+ if (state == WIMAX_RF_OFF)
+ return i2400m->state == I2400M_SS_RF_OFF
+ || i2400m->state == I2400M_SS_RF_SHUTDOWN;
+ else if (state == WIMAX_RF_ON)
+ /* state == WIMAX_RF_ON */
+ return i2400m->state != I2400M_SS_RF_OFF
+ && i2400m->state != I2400M_SS_RF_SHUTDOWN;
+ else
+ BUG();
+}
+
+
+/*
+ * WiMAX stack operation: implement SW RFKill toggling
+ *
+ * @wimax_dev: device descriptor
+ * @skb: skb where the message has been received; skb->data is
+ * expected to point to the message payload.
+ * @genl_info: passed by the generic netlink layer
+ *
+ * Generic Netlink will call this function when a message is sent from
+ * userspace to change the software RF-Kill switch status.
+ *
+ * This function will set the device's sofware RF-Kill switch state to
+ * match what is requested.
+ *
+ * NOTE: the i2400m has a strict state machine; we can only set the
+ * RF-Kill switch when it is on, the HW RF-Kill is on and the
+ * device is initialized. So we ignore errors steaming from not
+ * being in the right state (-EILSEQ).
+ */
+int i2400m_op_rfkill_sw_toggle(struct wimax_dev *wimax_dev,
+ enum wimax_rf_state state)
+{
+ int result;
+ struct i2400m *i2400m = wimax_dev_to_i2400m(wimax_dev);
+ struct device *dev = i2400m_dev(i2400m);
+ struct sk_buff *ack_skb;
+ struct {
+ struct i2400m_l3l4_hdr hdr;
+ struct i2400m_tlv_rf_operation sw_rf;
+ } __attribute__((packed)) *cmd;
+ char strerr[32];
+
+ d_fnstart(4, dev, "(wimax_dev %p state %d)\n", wimax_dev, state);
+
+ result = -ENOMEM;
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL)
+ goto error_alloc;
+ cmd->hdr.type = cpu_to_le16(I2400M_MT_CMD_RF_CONTROL);
+ cmd->hdr.length = sizeof(cmd->sw_rf);
+ cmd->hdr.version = cpu_to_le16(I2400M_L3L4_VERSION);
+ cmd->sw_rf.hdr.type = cpu_to_le16(I2400M_TLV_RF_OPERATION);
+ cmd->sw_rf.hdr.length = cpu_to_le16(sizeof(cmd->sw_rf.status));
+ switch (state) {
+ case WIMAX_RF_OFF: /* RFKILL ON, radio OFF */
+ cmd->sw_rf.status = cpu_to_le32(2);
+ break;
+ case WIMAX_RF_ON: /* RFKILL OFF, radio ON */
+ cmd->sw_rf.status = cpu_to_le32(1);
+ break;
+ default:
+ BUG();
+ }
+
+ ack_skb = i2400m_msg_to_dev(i2400m, cmd, sizeof(*cmd));
+ result = PTR_ERR(ack_skb);
+ if (IS_ERR(ack_skb)) {
+ dev_err(dev, "Failed to issue 'RF Control' command: %d\n",
+ result);
+ goto error_msg_to_dev;
+ }
+ result = i2400m_msg_check_status(wimax_msg_data(ack_skb),
+ strerr, sizeof(strerr));
+ if (result < 0) {
+ dev_err(dev, "'RF Control' (0x%04x) command failed: %d - %s\n",
+ I2400M_MT_CMD_RF_CONTROL, result, strerr);
+ goto error_cmd;
+ }
+
+ /* Now we wait for the state to change to RADIO_OFF or RADIO_ON */
+ result = wait_event_timeout(
+ i2400m->state_wq, i2400m_radio_is(i2400m, state),
+ 5 * HZ);
+ if (result == 0)
+ result = -ETIMEDOUT;
+ if (result < 0)
+ dev_err(dev, "Error waiting for device to toggle RF state: "
+ "%d\n", result);
+ result = 0;
+error_cmd:
+ kfree_skb(ack_skb);
+error_msg_to_dev:
+error_alloc:
+ d_fnend(4, dev, "(wimax_dev %p state %d) = %d\n",
+ wimax_dev, state, result);
+ return result;
+}
+
+
+/*
+ * Inform the WiMAX stack of changes in the RF Kill switches reported
+ * by the device
+ *
+ * @i2400m: device descriptor
+ * @rfss: TLV for RF Switches status; already validated
+ *
+ * NOTE: the reports on RF switch status cannot be trusted
+ * or used until the device is in a state of RADIO_OFF
+ * or greater.
+ */
+void i2400m_report_tlv_rf_switches_status(
+ struct i2400m *i2400m,
+ const struct i2400m_tlv_rf_switches_status *rfss)
+{
+ struct device *dev = i2400m_dev(i2400m);
+ enum i2400m_rf_switch_status hw, sw;
+ enum wimax_st wimax_state;
+
+ sw = le32_to_cpu(rfss->sw_rf_switch);
+ hw = le32_to_cpu(rfss->hw_rf_switch);
+
+ d_fnstart(3, dev, "(i2400m %p rfss %p [hw %u sw %u])\n",
+ i2400m, rfss, hw, sw);
+ /* We only process rw switch evens when the device has been
+ * fully initialized */
+ wimax_state = wimax_state_get(&i2400m->wimax_dev);
+ if (wimax_state < WIMAX_ST_RADIO_OFF) {
+ d_printf(3, dev, "ignoring RF switches report, state %u\n",
+ wimax_state);
+ goto out;
+ }
+ switch (sw) {
+ case I2400M_RF_SWITCH_ON: /* RF Kill disabled (radio on) */
+ wimax_report_rfkill_sw(&i2400m->wimax_dev, WIMAX_RF_ON);
+ break;
+ case I2400M_RF_SWITCH_OFF: /* RF Kill enabled (radio off) */
+ wimax_report_rfkill_sw(&i2400m->wimax_dev, WIMAX_RF_OFF);
+ break;
+ default:
+ dev_err(dev, "HW BUG? Unknown RF SW state 0x%x\n", sw);
+ }
+
+ switch (hw) {
+ case I2400M_RF_SWITCH_ON: /* RF Kill disabled (radio on) */
+ wimax_report_rfkill_hw(&i2400m->wimax_dev, WIMAX_RF_ON);
+ break;
+ case I2400M_RF_SWITCH_OFF: /* RF Kill enabled (radio off) */
+ wimax_report_rfkill_hw(&i2400m->wimax_dev, WIMAX_RF_OFF);
+ break;
+ default:
+ dev_err(dev, "HW BUG? Unknown RF HW state 0x%x\n", hw);
+ }
+out:
+ d_fnend(3, dev, "(i2400m %p rfss %p [hw %u sw %u]) = void\n",
+ i2400m, rfss, hw, sw);
+}
diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c
new file mode 100644
index 00000000000..6922022710a
--- /dev/null
+++ b/drivers/net/wimax/i2400m/rx.c
@@ -0,0 +1,534 @@
+/*
+ * Intel Wireless WiMAX Connection 2400m
+ * Handle incoming traffic and deliver it to the control or data planes
+ *
+ *
+ * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Intel Corporation
+ * Yanir Lubetkin
+ * - Initial implementation
+ * Inaky Perez-Gonzalez
+ * - Use skb_clone(), break up processing in chunks
+ * - Split transport/device specific
+ * - Make buffer size dynamic to exert less memory pressure
+ *
+ *
+ * This handles the RX path.
+ *
+ * We receive an RX message from the bus-specific driver, which
+ * contains one or more payloads that have potentially different
+ * destinataries (data or control paths).
+ *
+ * So we just take that payload from the transport specific code in
+ * the form of an skb, break it up in chunks (a cloned skb each in the
+ * case of network packets) and pass it to netdev or to the
+ * command/ack handler (and from there to the WiMAX stack).
+ *
+ * PROTOCOL FORMAT
+ *
+ * The format of the buffer is:
+ *
+ * HEADER (struct i2400m_msg_hdr)
+ * PAYLOAD DESCRIPTOR 0 (struct i2400m_pld)
+ * PAYLOAD DESCRIPTOR 1
+ * ...
+ * PAYLOAD DESCRIPTOR N
+ * PAYLOAD 0 (raw bytes)
+ * PAYLOAD 1
+ * ...
+ * PAYLOAD N
+ *
+ * See tx.c for a deeper description on alignment requirements and
+ * other fun facts of it.
+ *
+ * ROADMAP
+ *
+ * i2400m_rx
+ * i2400m_rx_msg_hdr_check
+ * i2400m_rx_pl_descr_check
+ * i2400m_rx_payload
+ * i2400m_net_rx
+ * i2400m_rx_ctl
+ * i2400m_msg_size_check
+ * i2400m_report_hook_work [in a workqueue]
+ * i2400m_report_hook
+ * wimax_msg_to_user
+ * i2400m_rx_ctl_ack
+ * wimax_msg_to_user_alloc
+ * i2400m_rx_trace
+ * i2400m_msg_size_check
+ * wimax_msg
+ */
+#include
+#include
+#include
+#include
+#include "i2400m.h"
+
+
+#define D_SUBMODULE rx
+#include "debug-levels.h"
+
+struct i2400m_report_hook_args {
+ struct sk_buff *skb_rx;
+ const struct i2400m_l3l4_hdr *l3l4_hdr;
+ size_t size;
+};
+
+
+/*
+ * Execute i2400m_report_hook in a workqueue
+ *
+ * Unpacks arguments from the deferred call, executes it and then
+ * drops the references.
+ *
+ * Obvious NOTE: References are needed because we are a separate
+ * thread; otherwise the buffer changes under us because it is
+ * released by the original caller.
+ */
+static
+void i2400m_report_hook_work(struct work_struct *ws)
+{
+ struct i2400m_work *iw =
+ container_of(ws, struct i2400m_work, ws);
+ struct i2400m_report_hook_args *args = (void *) iw->pl;
+ i2400m_report_hook(iw->i2400m, args->l3l4_hdr, args->size);
+ kfree_skb(args->skb_rx);
+ i2400m_put(iw->i2400m);
+ kfree(iw);
+}
+
+
+/*
+ * Process an ack to a command
+ *
+ * @i2400m: device descriptor
+ * @payload: pointer to message
+ * @size: size of the message
+ *
+ * Pass the acknodledgment (in an skb) to the thread that is waiting
+ * for it in i2400m->msg_completion.
+ *
+ * We need to coordinate properly with the thread waiting for the
+ * ack. Check if it is waiting or if it is gone. We loose the spinlock
+ * to avoid allocating on atomic contexts (yeah, could use GFP_ATOMIC,
+ * but this is not so speed critical).
+ */
+static
+void i2400m_rx_ctl_ack(struct i2400m *i2400m,
+ const void *payload, size_t size)
+{
+ struct device *dev = i2400m_dev(i2400m);
+ struct wimax_dev *wimax_dev = &i2400m->wimax_dev;
+ unsigned long flags;
+ struct sk_buff *ack_skb;
+
+ /* Anyone waiting for an answer? */
+ spin_lock_irqsave(&i2400m->rx_lock, flags);
+ if (i2400m->ack_skb != ERR_PTR(-EINPROGRESS)) {
+ dev_err(dev, "Huh? reply to command with no waiters\n");
+ goto error_no_waiter;
+ }
+ spin_unlock_irqrestore(&i2400m->rx_lock, flags);
+
+ ack_skb = wimax_msg_alloc(wimax_dev, NULL, payload, size, GFP_KERNEL);
+
+ /* Check waiter didn't time out waiting for the answer... */
+ spin_lock_irqsave(&i2400m->rx_lock, flags);
+ if (i2400m->ack_skb != ERR_PTR(-EINPROGRESS)) {
+ d_printf(1, dev, "Huh? waiter for command reply cancelled\n");
+ goto error_waiter_cancelled;
+ }
+ if (ack_skb == NULL) {
+ dev_err(dev, "CMD/GET/SET ack: cannot allocate SKB\n");
+ i2400m->ack_skb = ERR_PTR(-ENOMEM);
+ } else
+ i2400m->ack_skb = ack_skb;
+ spin_unlock_irqrestore(&i2400m->rx_lock, flags);
+ complete(&i2400m->msg_completion);
+ return;
+
+error_waiter_cancelled:
+ if (ack_skb)
+ kfree_skb(ack_skb);
+error_no_waiter:
+ spin_unlock_irqrestore(&i2400m->rx_lock, flags);
+ return;
+}
+
+
+/*
+ * Receive and process a control payload
+ *
+ * @i2400m: device descriptor
+ * @skb_rx: skb that contains the payload (for reference counting)
+ * @payload: pointer to message
+ * @size: size of the message
+ *
+ * There are two types of control RX messages: reports (asynchronous,
+ * like your every day interrupts) and 'acks' (reponses to a command,
+ * get or set request).
+ *
+ * If it is a report, we run hooks on it (to extract information for
+ * things we need to do in the driver) and then pass it over to the
+ * WiMAX stack to send it to user space.
+ *
+ * NOTE: report processing is done in a workqueue specific to the
+ * generic driver, to avoid deadlocks in the system.
+ *
+ * If it is not a report, it is an ack to a previously executed
+ * command, set or get, so wake up whoever is waiting for it from
+ * i2400m_msg_to_dev(). i2400m_rx_ctl_ack() takes care of that.
+ *
+ * Note that the sizes we pass to other functions from here are the
+ * sizes of the _l3l4_hdr + payload, not full buffer sizes, as we have
+ * verified in _msg_size_check() that they are congruent.
+ *
+ * For reports: We can't clone the original skb where the data is
+ * because we need to send this up via netlink; netlink has to add
+ * headers and we can't overwrite what's preceeding the payload...as
+ * it is another message. So we just dup them.
+ */
+static
+void i2400m_rx_ctl(struct i2400m *i2400m, struct sk_buff *skb_rx,
+ const void *payload, size_t size)
+{
+ int result;
+ struct device *dev = i2400m_dev(i2400m);
+ const struct i2400m_l3l4_hdr *l3l4_hdr = payload;
+ unsigned msg_type;
+
+ result = i2400m_msg_size_check(i2400m, l3l4_hdr, size);
+ if (result < 0) {
+ dev_err(dev, "HW BUG? device sent a bad message: %d\n",
+ result);
+ goto error_check;
+ }
+ msg_type = le16_to_cpu(l3l4_hdr->type);
+ d_printf(1, dev, "%s 0x%04x: %zu bytes\n",
+ msg_type & I2400M_MT_REPORT_MASK ? "REPORT" : "CMD/SET/GET",
+ msg_type, size);
+ d_dump(2, dev, l3l4_hdr, size);
+ if (msg_type & I2400M_MT_REPORT_MASK) {
+ /* These hooks have to be ran serialized; as well, the
+ * handling might force the execution of commands, and
+ * that might cause reentrancy issues with
+ * bus-specific subdrivers and workqueues. So we run
+ * it in a separate workqueue. */
+ struct i2400m_report_hook_args args = {
+ .skb_rx = skb_rx,
+ .l3l4_hdr = l3l4_hdr,
+ .size = size
+ };
+ if (unlikely(i2400m->ready == 0)) /* only send if up */
+ return;
+ skb_get(skb_rx);
+ i2400m_queue_work(i2400m, i2400m_report_hook_work,
+ GFP_KERNEL, &args, sizeof(args));
+ result = wimax_msg(&i2400m->wimax_dev, NULL, l3l4_hdr, size,
+ GFP_KERNEL);
+ if (result < 0)
+ dev_err(dev, "error sending report to userspace: %d\n",
+ result);
+ } else /* an ack to a CMD, GET or SET */
+ i2400m_rx_ctl_ack(i2400m, payload, size);
+error_check:
+ return;
+}
+
+
+
+
+/*
+ * Receive and send up a trace
+ *
+ * @i2400m: device descriptor
+ * @skb_rx: skb that contains the trace (for reference counting)
+ * @payload: pointer to trace message inside the skb
+ * @size: size of the message
+ *
+ * THe i2400m might produce trace information (diagnostics) and we
+ * send them through a different kernel-to-user pipe (to avoid
+ * clogging it).
+ *
+ * As in i2400m_rx_ctl(), we can't clone the original skb where the
+ * data is because we need to send this up via netlink; netlink has to
+ * add headers and we can't overwrite what's preceeding the
+ * payload...as it is another message. So we just dup them.
+ */
+static
+void i2400m_rx_trace(struct i2400m *i2400m,
+ const void *payload, size_t size)
+{
+ int result;
+ struct device *dev = i2400m_dev(i2400m);
+ struct wimax_dev *wimax_dev = &i2400m->wimax_dev;
+ const struct i2400m_l3l4_hdr *l3l4_hdr = payload;
+ unsigned msg_type;
+
+ result = i2400m_msg_size_check(i2400m, l3l4_hdr, size);
+ if (result < 0) {
+ dev_err(dev, "HW BUG? device sent a bad trace message: %d\n",
+ result);
+ goto error_check;
+ }
+ msg_type = le16_to_cpu(l3l4_hdr->type);
+ d_printf(1, dev, "Trace %s 0x%04x: %zu bytes\n",
+ msg_type & I2400M_MT_REPORT_MASK ? "REPORT" : "CMD/SET/GET",
+ msg_type, size);
+ d_dump(2, dev, l3l4_hdr, size);
+ if (unlikely(i2400m->ready == 0)) /* only send if up */
+ return;
+ result = wimax_msg(wimax_dev, "trace", l3l4_hdr, size, GFP_KERNEL);
+ if (result < 0)
+ dev_err(dev, "error sending trace to userspace: %d\n",
+ result);
+error_check:
+ return;
+}
+
+
+/*
+ * Act on a received payload
+ *
+ * @i2400m: device instance
+ * @skb_rx: skb where the transaction was received
+ * @single: 1 if there is only one payload, 0 otherwise
+ * @pld: payload descriptor
+ * @payload: payload data
+ *
+ * Upon reception of a payload, look at its guts in the payload
+ * descriptor and decide what to do with it.
+ */
+static
+void i2400m_rx_payload(struct i2400m *i2400m, struct sk_buff *skb_rx,
+ unsigned single, const struct i2400m_pld *pld,
+ const void *payload)
+{
+ struct device *dev = i2400m_dev(i2400m);
+ size_t pl_size = i2400m_pld_size(pld);
+ enum i2400m_pt pl_type = i2400m_pld_type(pld);
+
+ switch (pl_type) {
+ case I2400M_PT_DATA:
+ d_printf(3, dev, "RX: data payload %zu bytes\n", pl_size);
+ i2400m_net_rx(i2400m, skb_rx, single, payload, pl_size);
+ break;
+ case I2400M_PT_CTRL:
+ i2400m_rx_ctl(i2400m, skb_rx, payload, pl_size);
+ break;
+ case I2400M_PT_TRACE:
+ i2400m_rx_trace(i2400m, payload, pl_size);
+ break;
+ default: /* Anything else shouldn't come to the host */
+ if (printk_ratelimit())
+ dev_err(dev, "RX: HW BUG? unexpected payload type %u\n",
+ pl_type);
+ }
+}
+
+
+/*
+ * Check a received transaction's message header
+ *
+ * @i2400m: device descriptor
+ * @msg_hdr: message header
+ * @buf_size: size of the received buffer
+ *
+ * Check that the declarations done by a RX buffer message header are
+ * sane and consistent with the amount of data that was received.
+ */
+static
+int i2400m_rx_msg_hdr_check(struct i2400m *i2400m,
+ const struct i2400m_msg_hdr *msg_hdr,
+ size_t buf_size)
+{
+ int result = -EIO;
+ struct device *dev = i2400m_dev(i2400m);
+ if (buf_size < sizeof(*msg_hdr)) {
+ dev_err(dev, "RX: HW BUG? message with short header (%zu "
+ "vs %zu bytes expected)\n", buf_size, sizeof(*msg_hdr));
+ goto error;
+ }
+ if (msg_hdr->barker != cpu_to_le32(I2400M_D2H_MSG_BARKER)) {
+ dev_err(dev, "RX: HW BUG? message received with unknown "
+ "barker 0x%08x (buf_size %zu bytes)\n",
+ le32_to_cpu(msg_hdr->barker), buf_size);
+ goto error;
+ }
+ if (msg_hdr->num_pls == 0) {
+ dev_err(dev, "RX: HW BUG? zero payload packets in message\n");
+ goto error;
+ }
+ if (le16_to_cpu(msg_hdr->num_pls) > I2400M_MAX_PLS_IN_MSG) {
+ dev_err(dev, "RX: HW BUG? message contains more payload "
+ "than maximum; ignoring.\n");
+ goto error;
+ }
+ result = 0;
+error:
+ return result;
+}
+
+
+/*
+ * Check a payload descriptor against the received data
+ *
+ * @i2400m: device descriptor
+ * @pld: payload descriptor
+ * @pl_itr: offset (in bytes) in the received buffer the payload is
+ * located
+ * @buf_size: size of the received buffer
+ *
+ * Given a payload descriptor (part of a RX buffer), check it is sane
+ * and that the data it declares fits in the buffer.
+ */
+static
+int i2400m_rx_pl_descr_check(struct i2400m *i2400m,
+ const struct i2400m_pld *pld,
+ size_t pl_itr, size_t buf_size)
+{
+ int result = -EIO;
+ struct device *dev = i2400m_dev(i2400m);
+ size_t pl_size = i2400m_pld_size(pld);
+ enum i2400m_pt pl_type = i2400m_pld_type(pld);
+
+ if (pl_size > i2400m->bus_pl_size_max) {
+ dev_err(dev, "RX: HW BUG? payload @%zu: size %zu is "
+ "bigger than maximum %zu; ignoring message\n",
+ pl_itr, pl_size, i2400m->bus_pl_size_max);
+ goto error;
+ }
+ if (pl_itr + pl_size > buf_size) { /* enough? */
+ dev_err(dev, "RX: HW BUG? payload @%zu: size %zu "
+ "goes beyond the received buffer "
+ "size (%zu bytes); ignoring message\n",
+ pl_itr, pl_size, buf_size);
+ goto error;
+ }
+ if (pl_type >= I2400M_PT_ILLEGAL) {
+ dev_err(dev, "RX: HW BUG? illegal payload type %u; "
+ "ignoring message\n", pl_type);
+ goto error;
+ }
+ result = 0;
+error:
+ return result;
+}
+
+
+/**
+ * i2400m_rx - Receive a buffer of data from the device
+ *
+ * @i2400m: device descriptor
+ * @skb: skbuff where the data has been received
+ *
+ * Parse in a buffer of data that contains an RX message sent from the
+ * device. See the file header for the format. Run all checks on the
+ * buffer header, then run over each payload's descriptors, verify
+ * their consistency and act on each payload's contents. If
+ * everything is succesful, update the device's statistics.
+ *
+ * Note: You need to set the skb to contain only the length of the
+ * received buffer; for that, use skb_trim(skb, RECEIVED_SIZE).
+ *
+ * Returns:
+ *
+ * 0 if ok, < 0 errno on error
+ *
+ * If ok, this function owns now the skb and the caller DOESN'T have
+ * to run kfree_skb() on it. However, on error, the caller still owns
+ * the skb and it is responsible for releasing it.
+ */
+int i2400m_rx(struct i2400m *i2400m, struct sk_buff *skb)
+{
+ int i, result;
+ struct device *dev = i2400m_dev(i2400m);
+ const struct i2400m_msg_hdr *msg_hdr;
+ size_t pl_itr, pl_size, skb_len;
+ unsigned long flags;
+ unsigned num_pls;
+
+ skb_len = skb->len;
+ d_fnstart(4, dev, "(i2400m %p skb %p [size %zu])\n",
+ i2400m, skb, skb_len);
+ result = -EIO;
+ msg_hdr = (void *) skb->data;
+ result = i2400m_rx_msg_hdr_check(i2400m, msg_hdr, skb->len);
+ if (result < 0)
+ goto error_msg_hdr_check;
+ result = -EIO;
+ num_pls = le16_to_cpu(msg_hdr->num_pls);
+ pl_itr = sizeof(*msg_hdr) + /* Check payload descriptor(s) */
+ num_pls * sizeof(msg_hdr->pld[0]);
+ pl_itr = ALIGN(pl_itr, I2400M_PL_PAD);
+ if (pl_itr > skb->len) { /* got all the payload descriptors? */
+ dev_err(dev, "RX: HW BUG? message too short (%u bytes) for "
+ "%u payload descriptors (%zu each, total %zu)\n",
+ skb->len, num_pls, sizeof(msg_hdr->pld[0]), pl_itr);
+ goto error_pl_descr_short;
+ }
+ /* Walk each payload payload--check we really got it */
+ for (i = 0; i < num_pls; i++) {
+ /* work around old gcc warnings */
+ pl_size = i2400m_pld_size(&msg_hdr->pld[i]);
+ result = i2400m_rx_pl_descr_check(i2400m, &msg_hdr->pld[i],
+ pl_itr, skb->len);
+ if (result < 0)
+ goto error_pl_descr_check;
+ i2400m_rx_payload(i2400m, skb, num_pls == 1, &msg_hdr->pld[i],
+ skb->data + pl_itr);
+ pl_itr += ALIGN(pl_size, I2400M_PL_PAD);
+ cond_resched(); /* Don't monopolize */
+ }
+ kfree_skb(skb);
+ /* Update device statistics */
+ spin_lock_irqsave(&i2400m->rx_lock, flags);
+ i2400m->rx_pl_num += i;
+ if (i > i2400m->rx_pl_max)
+ i2400m->rx_pl_max = i;
+ if (i < i2400m->rx_pl_min)
+ i2400m->rx_pl_min = i;
+ i2400m->rx_num++;
+ i2400m->rx_size_acc += skb->len;
+ if (skb->len < i2400m->rx_size_min)
+ i2400m->rx_size_min = skb->len;
+ if (skb->len > i2400m->rx_size_max)
+ i2400m->rx_size_max = skb->len;
+ spin_unlock_irqrestore(&i2400m->rx_lock, flags);
+error_pl_descr_check:
+error_pl_descr_short:
+error_msg_hdr_check:
+ d_fnend(4, dev, "(i2400m %p skb %p [size %zu]) = %d\n",
+ i2400m, skb, skb_len, result);
+ return result;
+}
+EXPORT_SYMBOL_GPL(i2400m_rx);
diff --git a/drivers/net/wimax/i2400m/sdio-debug-levels.h b/drivers/net/wimax/i2400m/sdio-debug-levels.h
new file mode 100644
index 00000000000..c5199874130
--- /dev/null
+++ b/drivers/net/wimax/i2400m/sdio-debug-levels.h
@@ -0,0 +1,22 @@
+/*
+ * debug levels control file for the i2400m module's
+ */
+#ifndef __debug_levels__h__
+#define __debug_levels__h__
+
+/* Maximum compile and run time debug level for all submodules */
+#define D_MODULENAME i2400m_sdio
+#define D_MASTER CONFIG_WIMAX_I2400M_DEBUG_LEVEL
+
+#include
+
+/* List of all the enabled modules */
+enum d_module {
+ D_SUBMODULE_DECLARE(main),
+ D_SUBMODULE_DECLARE(tx),
+ D_SUBMODULE_DECLARE(rx),
+ D_SUBMODULE_DECLARE(fw)
+};
+
+
+#endif /* #ifndef __debug_levels__h__ */
diff --git a/drivers/net/wimax/i2400m/sdio-fw.c b/drivers/net/wimax/i2400m/sdio-fw.c
new file mode 100644
index 00000000000..3487205d8f5
--- /dev/null
+++ b/drivers/net/wimax/i2400m/sdio-fw.c
@@ -0,0 +1,224 @@
+/*
+ * Intel Wireless WiMAX Connection 2400m
+ * Firmware uploader's SDIO specifics
+ *
+ *
+ * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Intel Corporation
+ * Yanir Lubetkin
+ * Inaky Perez-Gonzalez
+ * - Initial implementation
+ *
+ * Inaky Perez-Gonzalez
+ * - Bus generic/specific split for USB
+ *
+ * Dirk Brandewie
+ * - Initial implementation for SDIO
+ *
+ * Inaky Perez-Gonzalez
+ * - SDIO rehash for changes in the bus-driver model
+ *
+ * THE PROCEDURE
+ *
+ * See fw.c for the generic description of this procedure.
+ *
+ * This file implements only the SDIO specifics. It boils down to how
+ * to send a command and waiting for an acknowledgement from the
+ * device. We do polled reads.
+ *
+ * COMMAND EXECUTION
+ *
+ * THe generic firmware upload code will call i2400m_bus_bm_cmd_send()
+ * to send commands.
+ *
+ * The SDIO devices expects things in 256 byte blocks, so it will pad
+ * it, compute the checksum (if needed) and pass it to SDIO.
+ *
+ * ACK RECEPTION
+ *
+ * This works in polling mode -- the fw loader says when to wait for
+ * data and for that it calls i2400ms_bus_bm_wait_for_ack().
+ *
+ * This will poll the device for data until it is received. We need to
+ * receive at least as much bytes as where asked for (although it'll
+ * always be a multiple of 256 bytes).
+ */
+#include
+#include "i2400m-sdio.h"
+
+
+#define D_SUBMODULE fw
+#include "sdio-debug-levels.h"
+
+/*
+ * Send a boot-mode command to the SDIO function
+ *
+ * We use a bounce buffer (i2400m->bm_cmd_buf) because we need to
+ * touch the header if the RAW flag is not set.
+ *
+ * @flags: pass thru from i2400m_bm_cmd()
+ * @return: cmd_size if ok, < 0 errno code on error.
+ *
+ * Note the command is padded to the SDIO block size for the device.
+ */
+ssize_t i2400ms_bus_bm_cmd_send(struct i2400m *i2400m,
+ const struct i2400m_bootrom_header *_cmd,
+ size_t cmd_size, int flags)
+{
+ ssize_t result;
+ struct device *dev = i2400m_dev(i2400m);
+ struct i2400ms *i2400ms = container_of(i2400m, struct i2400ms, i2400m);
+ int opcode = _cmd == NULL ? -1 : i2400m_brh_get_opcode(_cmd);
+ struct i2400m_bootrom_header *cmd;
+ /* SDIO restriction */
+ size_t cmd_size_a = ALIGN(cmd_size, I2400MS_BLK_SIZE);
+
+ d_fnstart(5, dev, "(i2400m %p cmd %p size %zu)\n",
+ i2400m, _cmd, cmd_size);
+ result = -E2BIG;
+ if (cmd_size > I2400M_BM_CMD_BUF_SIZE)
+ goto error_too_big;
+
+ memcpy(i2400m->bm_cmd_buf, _cmd, cmd_size); /* Prep command */
+ cmd = i2400m->bm_cmd_buf;
+ if (cmd_size_a > cmd_size) /* Zero pad space */
+ memset(i2400m->bm_cmd_buf + cmd_size, 0, cmd_size_a - cmd_size);
+ if ((flags & I2400M_BM_CMD_RAW) == 0) {
+ if (WARN_ON(i2400m_brh_get_response_required(cmd) == 0))
+ dev_warn(dev, "SW BUG: response_required == 0\n");
+ i2400m_bm_cmd_prepare(cmd);
+ }
+ d_printf(4, dev, "BM cmd %d: %zu bytes (%zu padded)\n",
+ opcode, cmd_size, cmd_size_a);
+ d_dump(5, dev, cmd, cmd_size);
+
+ sdio_claim_host(i2400ms->func); /* Send & check */
+ result = sdio_memcpy_toio(i2400ms->func, I2400MS_DATA_ADDR,
+ i2400m->bm_cmd_buf, cmd_size_a);
+ sdio_release_host(i2400ms->func);
+ if (result < 0) {
+ dev_err(dev, "BM cmd %d: cannot send: %ld\n",
+ opcode, (long) result);
+ goto error_cmd_send;
+ }
+ result = cmd_size;
+error_cmd_send:
+error_too_big:
+ d_fnend(5, dev, "(i2400m %p cmd %p size %zu) = %d\n",
+ i2400m, _cmd, cmd_size, (int) result);
+ return result;
+}
+
+
+/*
+ * Read an ack from the device's boot-mode (polling)
+ *
+ * @i2400m:
+ * @_ack: pointer to where to store the read data
+ * @ack_size: how many bytes we should read
+ *
+ * Returns: < 0 errno code on error; otherwise, amount of received bytes.
+ *
+ * The ACK for a BM command is always at least sizeof(*ack) bytes, so
+ * check for that. We don't need to check for device reboots
+ *
+ * NOTE: We do an artificial timeout of 1 sec over the SDIO timeout;
+ * this way we have control over it...there is no way that I know
+ * of setting an SDIO transaction timeout.
+ */
+ssize_t i2400ms_bus_bm_wait_for_ack(struct i2400m *i2400m,
+ struct i2400m_bootrom_header *ack,
+ size_t ack_size)
+{
+ int result;
+ ssize_t rx_size;
+ u64 timeout;
+ struct i2400ms *i2400ms = container_of(i2400m, struct i2400ms, i2400m);
+ struct sdio_func *func = i2400ms->func;
+ struct device *dev = &func->dev;
+
+ BUG_ON(sizeof(*ack) > ack_size);
+
+ d_fnstart(5, dev, "(i2400m %p ack %p size %zu)\n",
+ i2400m, ack, ack_size);
+
+ timeout = get_jiffies_64() + 2 * HZ;
+ sdio_claim_host(func);
+ while (1) {
+ if (time_after64(get_jiffies_64(), timeout)) {
+ rx_size = -ETIMEDOUT;
+ dev_err(dev, "timeout waiting for ack data\n");
+ goto error_timedout;
+ }
+
+ /* Find the RX size, check if it fits or not -- it if
+ * doesn't fit, fail, as we have no way to dispose of
+ * the extra data. */
+ rx_size = __i2400ms_rx_get_size(i2400ms);
+ if (rx_size < 0)
+ goto error_rx_get_size;
+ result = -ENOSPC; /* Check it fits */
+ if (rx_size < sizeof(*ack)) {
+ rx_size = -EIO;
+ dev_err(dev, "HW BUG? received is too small (%zu vs "
+ "%zu needed)\n", sizeof(*ack), rx_size);
+ goto error_too_small;
+ }
+ if (rx_size > I2400M_BM_ACK_BUF_SIZE) {
+ dev_err(dev, "SW BUG? BM_ACK_BUF is too small (%u vs "
+ "%zu needed)\n", I2400M_BM_ACK_BUF_SIZE,
+ rx_size);
+ goto error_too_small;
+ }
+
+ /* Read it */
+ result = sdio_memcpy_fromio(func, i2400m->bm_ack_buf,
+ I2400MS_DATA_ADDR, rx_size);
+ if (result == -ETIMEDOUT || result == -ETIME)
+ continue;
+ if (result < 0) {
+ dev_err(dev, "BM SDIO receive (%zu B) failed: %d\n",
+ rx_size, result);
+ goto error_read;
+ } else
+ break;
+ }
+ rx_size = min((ssize_t)ack_size, rx_size);
+ memcpy(ack, i2400m->bm_ack_buf, rx_size);
+error_read:
+error_too_small:
+error_rx_get_size:
+error_timedout:
+ sdio_release_host(func);
+ d_fnend(5, dev, "(i2400m %p ack %p size %zu) = %ld\n",
+ i2400m, ack, ack_size, (long) rx_size);
+ return rx_size;
+}
diff --git a/drivers/net/wimax/i2400m/sdio-rx.c b/drivers/net/wimax/i2400m/sdio-rx.c
new file mode 100644
index 00000000000..a3008b904f7
--- /dev/null
+++ b/drivers/net/wimax/i2400m/sdio-rx.c
@@ -0,0 +1,255 @@
+/*
+ * Intel Wireless WiMAX Connection 2400m
+ * SDIO RX handling
+ *
+ *
+ * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Intel Corporation
+ * Dirk Brandewie
+ * - Initial implementation
+ *
+ *
+ * This handles the RX path on SDIO.
+ *
+ * The SDIO bus driver calls the "irq" routine when data is available.
+ * This is not a traditional interrupt routine since the SDIO bus
+ * driver calls us from its irq thread context. Because of this
+ * sleeping in the SDIO RX IRQ routine is okay.
+ *
+ * From there on, we obtain the size of the data that is available,
+ * allocate an skb, copy it and then pass it to the generic driver's
+ * RX routine [i2400m_rx()].
+ *
+ * ROADMAP
+ *
+ * i2400ms_irq()
+ * i2400ms_rx()
+ * __i2400ms_rx_get_size()
+ * i2400m_rx()
+ *
+ * i2400ms_rx_setup()
+ *
+ * i2400ms_rx_release()
+ */
+#include
+#include
+#include
+#include
+#include
+#include "i2400m-sdio.h"
+
+#define D_SUBMODULE rx
+#include "sdio-debug-levels.h"
+
+
+/*
+ * Read and return the amount of bytes available for RX
+ *
+ * The RX size has to be read like this: byte reads of three
+ * sequential locations; then glue'em together.
+ *
+ * sdio_readl() doesn't work.
+ */
+ssize_t __i2400ms_rx_get_size(struct i2400ms *i2400ms)
+{
+ int ret, cnt, val;
+ ssize_t rx_size;
+ unsigned xfer_size_addr;
+ struct sdio_func *func = i2400ms->func;
+ struct device *dev = &i2400ms->func->dev;
+
+ d_fnstart(7, dev, "(i2400ms %p)\n", i2400ms);
+ xfer_size_addr = I2400MS_INTR_GET_SIZE_ADDR;
+ rx_size = 0;
+ for (cnt = 0; cnt < 3; cnt++) {
+ val = sdio_readb(func, xfer_size_addr + cnt, &ret);
+ if (ret < 0) {
+ dev_err(dev, "RX: Can't read byte %d of RX size from "
+ "0x%08x: %d\n", cnt, xfer_size_addr + cnt, ret);
+ rx_size = ret;
+ goto error_read;
+ }
+ rx_size = rx_size << 8 | (val & 0xff);
+ }
+ d_printf(6, dev, "RX: rx_size is %ld\n", (long) rx_size);
+error_read:
+ d_fnend(7, dev, "(i2400ms %p) = %ld\n", i2400ms, (long) rx_size);
+ return rx_size;
+}
+
+
+/*
+ * Read data from the device (when in normal)
+ *
+ * Allocate an SKB of the right size, read the data in and then
+ * deliver it to the generic layer.
+ *
+ * We also check for a reboot barker. That means the device died and
+ * we have to reboot it.
+ */
+static
+void i2400ms_rx(struct i2400ms *i2400ms)
+{
+ int ret;
+ struct sdio_func *func = i2400ms->func;
+ struct device *dev = &func->dev;
+ struct i2400m *i2400m = &i2400ms->i2400m;
+ struct sk_buff *skb;
+ ssize_t rx_size;
+
+ d_fnstart(7, dev, "(i2400ms %p)\n", i2400ms);
+ rx_size = __i2400ms_rx_get_size(i2400ms);
+ if (rx_size < 0) {
+ ret = rx_size;
+ goto error_get_size;
+ }
+ ret = -ENOMEM;
+ skb = alloc_skb(rx_size, GFP_ATOMIC);
+ if (NULL == skb) {
+ dev_err(dev, "RX: unable to alloc skb\n");
+ goto error_alloc_skb;
+ }
+
+ ret = sdio_memcpy_fromio(func, skb->data,
+ I2400MS_DATA_ADDR, rx_size);
+ if (ret < 0) {
+ dev_err(dev, "RX: SDIO data read failed: %d\n", ret);
+ goto error_memcpy_fromio;
+ }
+ /* Check if device has reset */
+ if (!memcmp(skb->data, i2400m_NBOOT_BARKER,
+ sizeof(i2400m_NBOOT_BARKER))
+ || !memcmp(skb->data, i2400m_SBOOT_BARKER,
+ sizeof(i2400m_SBOOT_BARKER))) {
+ ret = i2400m_dev_reset_handle(i2400m);
+ kfree_skb(skb);
+ } else {
+ skb_put(skb, rx_size);
+ i2400m_rx(i2400m, skb);
+ }
+ d_fnend(7, dev, "(i2400ms %p) = void\n", i2400ms);
+ return;
+
+error_memcpy_fromio:
+ kfree_skb(skb);
+error_alloc_skb:
+error_get_size:
+ d_fnend(7, dev, "(i2400ms %p) = %d\n", i2400ms, ret);
+ return;
+}
+
+
+/*
+ * Process an interrupt from the SDIO card
+ *
+ * FIXME: need to process other events that are not just ready-to-read
+ *
+ * Checks there is data ready and then proceeds to read it.
+ */
+static
+void i2400ms_irq(struct sdio_func *func)
+{
+ int ret;
+ struct i2400ms *i2400ms = sdio_get_drvdata(func);
+ struct i2400m *i2400m = &i2400ms->i2400m;
+ struct device *dev = &func->dev;
+ int val;
+
+ d_fnstart(6, dev, "(i2400ms %p)\n", i2400ms);
+ val = sdio_readb(func, I2400MS_INTR_STATUS_ADDR, &ret);
+ if (ret < 0) {
+ dev_err(dev, "RX: Can't read interrupt status: %d\n", ret);
+ goto error_no_irq;
+ }
+ if (!val) {
+ dev_err(dev, "RX: BUG? got IRQ but no interrupt ready?\n");
+ goto error_no_irq;
+ }
+ sdio_writeb(func, 1, I2400MS_INTR_CLEAR_ADDR, &ret);
+ if (WARN_ON(i2400m->boot_mode != 0))
+ dev_err(dev, "RX: SW BUG? boot mode and IRQ is up?\n");
+ else
+ i2400ms_rx(i2400ms);
+error_no_irq:
+ d_fnend(6, dev, "(i2400ms %p) = void\n", i2400ms);
+ return;
+}
+
+
+/*
+ * Setup SDIO RX
+ *
+ * Hooks up the IRQ handler and then enables IRQs.
+ */
+int i2400ms_rx_setup(struct i2400ms *i2400ms)
+{
+ int result;
+ struct sdio_func *func = i2400ms->func;
+ struct device *dev = &func->dev;
+
+ d_fnstart(5, dev, "(i2400ms %p)\n", i2400ms);
+ sdio_claim_host(func);
+ result = sdio_claim_irq(func, i2400ms_irq);
+ if (result < 0) {
+ dev_err(dev, "Cannot claim IRQ: %d\n", result);
+ goto error_irq_claim;
+ }
+ result = 0;
+ sdio_writeb(func, 1, I2400MS_INTR_ENABLE_ADDR, &result);
+ if (result < 0) {
+ sdio_release_irq(func);
+ dev_err(dev, "Failed to enable interrupts %d\n", result);
+ }
+error_irq_claim:
+ sdio_release_host(func);
+ d_fnend(5, dev, "(i2400ms %p) = %d\n", i2400ms, result);
+ return result;
+}
+
+
+/*
+ * Tear down SDIO RX
+ *
+ * Disables IRQs in the device and removes the IRQ handler.
+ */
+void i2400ms_rx_release(struct i2400ms *i2400ms)
+{
+ int result;
+ struct sdio_func *func = i2400ms->func;
+ struct device *dev = &func->dev;
+
+ d_fnstart(5, dev, "(i2400ms %p)\n", i2400ms);
+ sdio_claim_host(func);
+ sdio_writeb(func, 0, I2400MS_INTR_ENABLE_ADDR, &result);
+ sdio_release_irq(func);
+ sdio_release_host(func);
+ d_fnend(5, dev, "(i2400ms %p) = %d\n", i2400ms, result);
+}
diff --git a/drivers/net/wimax/i2400m/sdio-tx.c b/drivers/net/wimax/i2400m/sdio-tx.c
new file mode 100644
index 00000000000..5105a5ebc44
--- /dev/null
+++ b/drivers/net/wimax/i2400m/sdio-tx.c
@@ -0,0 +1,153 @@
+/*
+ * Intel Wireless WiMAX Connection 2400m
+ * SDIO TX transaction backends
+ *
+ *
+ * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Intel Corporation
+ * Dirk Brandewie
+ * - Initial implementation
+ *
+ *
+ * Takes the TX messages in the i2400m's driver TX FIFO and sends them
+ * to the device until there are no more.
+ *
+ * If we fail sending the message, we just drop it. There isn't much
+ * we can do at this point. Most of the traffic is network, which has
+ * recovery methods for dropped packets.
+ *
+ * The SDIO functions are not atomic, so we can't run from the context
+ * where i2400m->bus_tx_kick() [i2400ms_bus_tx_kick()] is being called
+ * (some times atomic). Thus, the actual TX work is deferred to a
+ * workqueue.
+ *
+ * ROADMAP
+ *
+ * i2400ms_bus_tx_kick()
+ * i2400ms_tx_submit() [through workqueue]
+ *
+ * i2400m_tx_setup()
+ *
+ * i2400m_tx_release()
+ */
+#include
+#include "i2400m-sdio.h"
+
+#define D_SUBMODULE tx
+#include "sdio-debug-levels.h"
+
+
+/*
+ * Pull TX transations from the TX FIFO and send them to the device
+ * until there are no more.
+ */
+static
+void i2400ms_tx_submit(struct work_struct *ws)
+{
+ int result;
+ struct i2400ms *i2400ms = container_of(ws, struct i2400ms, tx_worker);
+ struct i2400m *i2400m = &i2400ms->i2400m;
+ struct sdio_func *func = i2400ms->func;
+ struct device *dev = &func->dev;
+ struct i2400m_msg_hdr *tx_msg;
+ size_t tx_msg_size;
+
+ d_fnstart(4, dev, "(i2400ms %p, i2400m %p)\n", i2400ms, i2400ms);
+
+ while (NULL != (tx_msg = i2400m_tx_msg_get(i2400m, &tx_msg_size))) {
+ d_printf(2, dev, "TX: submitting %zu bytes\n", tx_msg_size);
+ d_dump(5, dev, tx_msg, tx_msg_size);
+
+ sdio_claim_host(func);
+ result = sdio_memcpy_toio(func, 0, tx_msg, tx_msg_size);
+ sdio_release_host(func);
+
+ i2400m_tx_msg_sent(i2400m);
+
+ if (result < 0) {
+ dev_err(dev, "TX: cannot submit TX; tx_msg @%zu %zu B:"
+ " %d\n", (void *) tx_msg - i2400m->tx_buf,
+ tx_msg_size, result);
+ }
+
+ d_printf(2, dev, "TX: %zub submitted\n", tx_msg_size);
+ }
+
+ d_fnend(4, dev, "(i2400ms %p) = void\n", i2400ms);
+}
+
+
+/*
+ * The generic driver notifies us that there is data ready for TX
+ *
+ * Schedule a run of i2400ms_tx_submit() to handle it.
+ */
+void i2400ms_bus_tx_kick(struct i2400m *i2400m)
+{
+ struct i2400ms *i2400ms = container_of(i2400m, struct i2400ms, i2400m);
+ struct device *dev = &i2400ms->func->dev;
+
+ d_fnstart(3, dev, "(i2400m %p) = void\n", i2400m);
+
+ /* schedule tx work, this is because tx may block, therefore
+ * it has to run in a thread context.
+ */
+ queue_work(i2400ms->tx_workqueue, &i2400ms->tx_worker);
+
+ d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
+}
+
+int i2400ms_tx_setup(struct i2400ms *i2400ms)
+{
+ int result;
+ struct device *dev = &i2400ms->func->dev;
+ struct i2400m *i2400m = &i2400ms->i2400m;
+
+ d_fnstart(5, dev, "(i2400ms %p)\n", i2400ms);
+
+ INIT_WORK(&i2400ms->tx_worker, i2400ms_tx_submit);
+ snprintf(i2400ms->tx_wq_name, sizeof(i2400ms->tx_wq_name),
+ "%s-tx", i2400m->wimax_dev.name);
+ i2400ms->tx_workqueue =
+ create_singlethread_workqueue(i2400ms->tx_wq_name);
+ if (NULL == i2400ms->tx_workqueue) {
+ dev_err(dev, "TX: failed to create workqueue\n");
+ result = -ENOMEM;
+ } else
+ result = 0;
+ d_fnend(5, dev, "(i2400ms %p) = %d\n", i2400ms, result);
+ return result;
+}
+
+void i2400ms_tx_release(struct i2400ms *i2400ms)
+{
+ destroy_workqueue(i2400ms->tx_workqueue);
+}
diff --git a/drivers/net/wimax/i2400m/sdio.c b/drivers/net/wimax/i2400m/sdio.c
new file mode 100644
index 00000000000..1bfa283bbd8
--- /dev/null
+++ b/drivers/net/wimax/i2400m/sdio.c
@@ -0,0 +1,511 @@
+/*
+ * Intel Wireless WiMAX Connection 2400m
+ * Linux driver model glue for the SDIO device, reset & fw upload
+ *
+ *
+ * Copyright (C) 2007-2008 Intel Corporation
+ * Dirk Brandewie
+ * Inaky Perez-Gonzalez
+ * Yanir Lubetkin
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * See i2400m-sdio.h for a general description of this driver.
+ *
+ * This file implements driver model glue, and hook ups for the
+ * generic driver to implement the bus-specific functions (device
+ * communication setup/tear down, firmware upload and resetting).
+ *
+ * ROADMAP
+ *
+ * i2400m_probe()
+ * alloc_netdev()
+ * i2400ms_netdev_setup()
+ * i2400ms_init()
+ * i2400m_netdev_setup()
+ * i2400ms_enable_function()
+ * i2400m_setup()
+ *
+ * i2400m_remove()
+ * i2400m_release()
+ * free_netdev(net_dev)
+ *
+ * i2400ms_bus_reset() Called by i2400m->bus_reset
+ * __i2400ms_reset()
+ * __i2400ms_send_barker()
+ *
+ * i2400ms_bus_dev_start() Called by i2400m_dev_start() [who is
+ * i2400ms_tx_setup() called by i2400m_setup()]
+ * i2400ms_rx_setup()
+ *
+ * i2400ms_bus_dev_stop() Called by i2400m_dev_stop() [who is
+ * i2400ms_rx_release() is called by i2400m_release()]
+ * i2400ms_tx_release()
+ *
+ */
+
+#include
+#include
+#include
+#include "i2400m-sdio.h"
+#include
+
+#define D_SUBMODULE main
+#include "sdio-debug-levels.h"
+
+/* IOE WiMAX function timeout in seconds */
+static int ioe_timeout = 2;
+module_param(ioe_timeout, int, 0);
+
+/* Our firmware file name */
+#define I2400MS_FW_FILE_NAME "i2400m-fw-sdio-" I2400M_FW_VERSION ".sbcf"
+
+/*
+ * Enable the SDIO function
+ *
+ * Tries to enable the SDIO function; might fail if it is still not
+ * ready (in some hardware, the SDIO WiMAX function is only enabled
+ * when we ask it to explicitly doing). Tries until a timeout is
+ * reached.
+ *
+ * The reverse of this is...sdio_disable_function()
+ *
+ * Returns: 0 if the SDIO function was enabled, < 0 errno code on
+ * error (-ENODEV when it was unable to enable the function).
+ */
+static
+int i2400ms_enable_function(struct sdio_func *func)
+{
+ u64 timeout;
+ int err;
+ struct device *dev = &func->dev;
+
+ d_fnstart(3, dev, "(func %p)\n", func);
+ /* Setup timeout (FIXME: This needs to read the CIS table to
+ * get a real timeout) and then wait for the device to signal
+ * it is ready */
+ timeout = get_jiffies_64() + ioe_timeout * HZ;
+ err = -ENODEV;
+ while (err != 0 && time_before64(get_jiffies_64(), timeout)) {
+ sdio_claim_host(func);
+ err = sdio_enable_func(func);
+ if (0 == err) {
+ sdio_release_host(func);
+ d_printf(2, dev, "SDIO function enabled\n");
+ goto function_enabled;
+ }
+ d_printf(2, dev, "SDIO function failed to enable: %d\n", err);
+ sdio_disable_func(func);
+ sdio_release_host(func);
+ msleep(I2400MS_INIT_SLEEP_INTERVAL);
+ }
+ /* If timed out, device is not there yet -- get -ENODEV so
+ * the device driver core will retry later on. */
+ if (err == -ETIME) {
+ dev_err(dev, "Can't enable WiMAX function; "
+ " has the function been enabled?\n");
+ err = -ENODEV;
+ }
+function_enabled:
+ d_fnend(3, dev, "(func %p) = %d\n", func, err);
+ return err;
+}
+
+
+/*
+ * Setup driver resources needed to communicate with the device
+ *
+ * The fw needs some time to settle, and it was just uploaded,
+ * so give it a break first. I'd prefer to just wait for the device to
+ * send something, but seems the poking we do to enable SDIO stuff
+ * interferes with it, so just give it a break before starting...
+ */
+static
+int i2400ms_bus_dev_start(struct i2400m *i2400m)
+{
+ int result;
+ struct i2400ms *i2400ms = container_of(i2400m, struct i2400ms, i2400m);
+ struct sdio_func *func = i2400ms->func;
+ struct device *dev = &func->dev;
+
+ d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
+ msleep(200);
+ result = i2400ms_rx_setup(i2400ms);
+ if (result < 0)
+ goto error_rx_setup;
+ result = i2400ms_tx_setup(i2400ms);
+ if (result < 0)
+ goto error_tx_setup;
+ d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result);
+ return result;
+
+ i2400ms_tx_release(i2400ms);
+error_tx_setup:
+ i2400ms_rx_release(i2400ms);
+error_rx_setup:
+ d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
+ return result;
+}
+
+
+static
+void i2400ms_bus_dev_stop(struct i2400m *i2400m)
+{
+ struct i2400ms *i2400ms = container_of(i2400m, struct i2400ms, i2400m);
+ struct sdio_func *func = i2400ms->func;
+ struct device *dev = &func->dev;
+
+ d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
+ i2400ms_rx_release(i2400ms);
+ i2400ms_tx_release(i2400ms);
+ d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
+}
+
+
+/*
+ * Sends a barker buffer to the device
+ *
+ * This helper will allocate a kmalloced buffer and use it to transmit
+ * (then free it). Reason for this is that the SDIO host controller
+ * expects alignment (unknown exactly which) which the stack won't
+ * really provide and certain arches/host-controller combinations
+ * cannot use stack/vmalloc/text areas for DMA transfers.
+ */
+static
+int __i2400ms_send_barker(struct i2400ms *i2400ms,
+ const __le32 *barker, size_t barker_size)
+{
+ int ret;
+ struct sdio_func *func = i2400ms->func;
+ struct device *dev = &func->dev;
+ void *buffer;
+
+ ret = -ENOMEM;
+ buffer = kmalloc(I2400MS_BLK_SIZE, GFP_KERNEL);
+ if (buffer == NULL)
+ goto error_kzalloc;
+
+ memcpy(buffer, barker, barker_size);
+ sdio_claim_host(func);
+ ret = sdio_memcpy_toio(func, 0, buffer, I2400MS_BLK_SIZE);
+ sdio_release_host(func);
+
+ if (ret < 0)
+ d_printf(0, dev, "E: barker error: %d\n", ret);
+
+ kfree(buffer);
+error_kzalloc:
+ return ret;
+}
+
+
+/*
+ * Reset a device at different levels (warm, cold or bus)
+ *
+ * @i2400ms: device descriptor
+ * @reset_type: soft, warm or bus reset (I2400M_RT_WARM/SOFT/BUS)
+ *
+ * FIXME: not tested -- need to confirm expected effects
+ *
+ * Warm and cold resets get an SDIO reset if they fail (unimplemented)
+ *
+ * Warm reset:
+ *
+ * The device will be fully reset internally, but won't be
+ * disconnected from the USB bus (so no reenumeration will
+ * happen). Firmware upload will be neccessary.
+ *
+ * The device will send a reboot barker in the notification endpoint
+ * that will trigger the driver to reinitialize the state
+ * automatically from notif.c:i2400m_notification_grok() into
+ * i2400m_dev_bootstrap_delayed().
+ *
+ * Cold and bus (USB) reset:
+ *
+ * The device will be fully reset internally, disconnected from the
+ * USB bus an a reenumeration will happen. Firmware upload will be
+ * neccessary. Thus, we don't do any locking or struct
+ * reinitialization, as we are going to be fully disconnected and
+ * reenumerated.
+ *
+ * Note we need to return -ENODEV if a warm reset was requested and we
+ * had to resort to a bus reset. See i2400m_op_reset(), wimax_reset()
+ * and wimax_dev->op_reset.
+ *
+ * WARNING: no driver state saved/fixed
+ */
+static
+int i2400ms_bus_reset(struct i2400m *i2400m, enum i2400m_reset_type rt)
+{
+ int result;
+ struct i2400ms *i2400ms =
+ container_of(i2400m, struct i2400ms, i2400m);
+ struct device *dev = i2400m_dev(i2400m);
+ static const __le32 i2400m_WARM_BOOT_BARKER[4] = {
+ __constant_cpu_to_le32(I2400M_WARM_RESET_BARKER),
+ __constant_cpu_to_le32(I2400M_WARM_RESET_BARKER),
+ __constant_cpu_to_le32(I2400M_WARM_RESET_BARKER),
+ __constant_cpu_to_le32(I2400M_WARM_RESET_BARKER),
+ };
+ static const __le32 i2400m_COLD_BOOT_BARKER[4] = {
+ __constant_cpu_to_le32(I2400M_COLD_RESET_BARKER),
+ __constant_cpu_to_le32(I2400M_COLD_RESET_BARKER),
+ __constant_cpu_to_le32(I2400M_COLD_RESET_BARKER),
+ __constant_cpu_to_le32(I2400M_COLD_RESET_BARKER),
+ };
+
+ if (rt == I2400M_RT_WARM)
+ result = __i2400ms_send_barker(i2400ms, i2400m_WARM_BOOT_BARKER,
+ sizeof(i2400m_WARM_BOOT_BARKER));
+ else if (rt == I2400M_RT_COLD)
+ result = __i2400ms_send_barker(i2400ms, i2400m_COLD_BOOT_BARKER,
+ sizeof(i2400m_COLD_BOOT_BARKER));
+ else if (rt == I2400M_RT_BUS) {
+do_bus_reset:
+ dev_err(dev, "FIXME: SDIO bus reset not implemented\n");
+ result = rt == I2400M_RT_WARM ? -ENODEV : -ENOSYS;
+ } else
+ BUG();
+ if (result < 0 && rt != I2400M_RT_BUS) {
+ dev_err(dev, "%s reset failed (%d); trying SDIO reset\n",
+ rt == I2400M_RT_WARM ? "warm" : "cold", result);
+ rt = I2400M_RT_BUS;
+ goto do_bus_reset;
+ }
+ return result;
+}
+
+
+static
+void i2400ms_netdev_setup(struct net_device *net_dev)
+{
+ struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
+ struct i2400ms *i2400ms = container_of(i2400m, struct i2400ms, i2400m);
+ i2400ms_init(i2400ms);
+ i2400m_netdev_setup(net_dev);
+}
+
+
+/*
+ * Debug levels control; see debug.h
+ */
+struct d_level D_LEVEL[] = {
+ D_SUBMODULE_DEFINE(main),
+ D_SUBMODULE_DEFINE(tx),
+ D_SUBMODULE_DEFINE(rx),
+ D_SUBMODULE_DEFINE(fw),
+};
+size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL);
+
+
+#define __debugfs_register(prefix, name, parent) \
+do { \
+ result = d_level_register_debugfs(prefix, name, parent); \
+ if (result < 0) \
+ goto error; \
+} while (0)
+
+
+static
+int i2400ms_debugfs_add(struct i2400ms *i2400ms)
+{
+ int result;
+ struct dentry *dentry = i2400ms->i2400m.wimax_dev.debugfs_dentry;
+
+ dentry = debugfs_create_dir("i2400m-usb", dentry);
+ result = PTR_ERR(dentry);
+ if (IS_ERR(dentry)) {
+ if (result == -ENODEV)
+ result = 0; /* No debugfs support */
+ goto error;
+ }
+ i2400ms->debugfs_dentry = dentry;
+ __debugfs_register("dl_", main, dentry);
+ __debugfs_register("dl_", tx, dentry);
+ __debugfs_register("dl_", rx, dentry);
+ __debugfs_register("dl_", fw, dentry);
+
+ return 0;
+
+error:
+ debugfs_remove_recursive(i2400ms->debugfs_dentry);
+ return result;
+}
+
+
+/*
+ * Probe a i2400m interface and register it
+ *
+ * @func: SDIO function
+ * @id: SDIO device ID
+ * @returns: 0 if ok, < 0 errno code on error.
+ *
+ * Alloc a net device, initialize the bus-specific details and then
+ * calls the bus-generic initialization routine. That will register
+ * the wimax and netdev devices, upload the firmware [using
+ * _bus_bm_*()], call _bus_dev_start() to finalize the setup of the
+ * communication with the device and then will start to talk to it to
+ * finnish setting it up.
+ *
+ * Initialization is tricky; some instances of the hw are packed with
+ * others in a way that requires a third driver that enables the WiMAX
+ * function. In those cases, we can't enable the SDIO function and
+ * we'll return with -ENODEV. When the driver that enables the WiMAX
+ * function does its thing, it has to do a bus_rescan_devices() on the
+ * SDIO bus so this driver is called again to enumerate the WiMAX
+ * function.
+ */
+static
+int i2400ms_probe(struct sdio_func *func,
+ const struct sdio_device_id *id)
+{
+ int result;
+ struct net_device *net_dev;
+ struct device *dev = &func->dev;
+ struct i2400m *i2400m;
+ struct i2400ms *i2400ms;
+
+ /* Allocate instance [calls i2400m_netdev_setup() on it]. */
+ result = -ENOMEM;
+ net_dev = alloc_netdev(sizeof(*i2400ms), "wmx%d",
+ i2400ms_netdev_setup);
+ if (net_dev == NULL) {
+ dev_err(dev, "no memory for network device instance\n");
+ goto error_alloc_netdev;
+ }
+ SET_NETDEV_DEV(net_dev, dev);
+ i2400m = net_dev_to_i2400m(net_dev);
+ i2400ms = container_of(i2400m, struct i2400ms, i2400m);
+ i2400m->wimax_dev.net_dev = net_dev;
+ i2400ms->func = func;
+ sdio_set_drvdata(func, i2400ms);
+
+ i2400m->bus_tx_block_size = I2400MS_BLK_SIZE;
+ i2400m->bus_pl_size_max = I2400MS_PL_SIZE_MAX;
+ i2400m->bus_dev_start = i2400ms_bus_dev_start;
+ i2400m->bus_dev_stop = i2400ms_bus_dev_stop;
+ i2400m->bus_tx_kick = i2400ms_bus_tx_kick;
+ i2400m->bus_reset = i2400ms_bus_reset;
+ i2400m->bus_bm_cmd_send = i2400ms_bus_bm_cmd_send;
+ i2400m->bus_bm_wait_for_ack = i2400ms_bus_bm_wait_for_ack;
+ i2400m->bus_fw_name = I2400MS_FW_FILE_NAME;
+ i2400m->bus_bm_mac_addr_impaired = 1;
+
+ result = i2400ms_enable_function(i2400ms->func);
+ if (result < 0) {
+ dev_err(dev, "Cannot enable SDIO function: %d\n", result);
+ goto error_func_enable;
+ }
+
+ sdio_claim_host(func);
+ result = sdio_set_block_size(func, I2400MS_BLK_SIZE);
+ if (result < 0) {
+ dev_err(dev, "Failed to set block size: %d\n", result);
+ goto error_set_blk_size;
+ }
+ sdio_release_host(func);
+
+ result = i2400m_setup(i2400m, I2400M_BRI_NO_REBOOT);
+ if (result < 0) {
+ dev_err(dev, "cannot setup device: %d\n", result);
+ goto error_setup;
+ }
+
+ result = i2400ms_debugfs_add(i2400ms);
+ if (result < 0) {
+ dev_err(dev, "cannot create SDIO debugfs: %d\n",
+ result);
+ goto error_debugfs_add;
+ }
+ return 0;
+
+error_debugfs_add:
+ i2400m_release(i2400m);
+error_setup:
+ sdio_set_drvdata(func, NULL);
+ sdio_claim_host(func);
+error_set_blk_size:
+ sdio_disable_func(func);
+ sdio_release_host(func);
+error_func_enable:
+ free_netdev(net_dev);
+error_alloc_netdev:
+ return result;
+}
+
+
+static
+void i2400ms_remove(struct sdio_func *func)
+{
+ struct device *dev = &func->dev;
+ struct i2400ms *i2400ms = sdio_get_drvdata(func);
+ struct i2400m *i2400m = &i2400ms->i2400m;
+ struct net_device *net_dev = i2400m->wimax_dev.net_dev;
+
+ d_fnstart(3, dev, "SDIO func %p\n", func);
+ debugfs_remove_recursive(i2400ms->debugfs_dentry);
+ i2400m_release(i2400m);
+ sdio_set_drvdata(func, NULL);
+ sdio_claim_host(func);
+ sdio_disable_func(func);
+ sdio_release_host(func);
+ free_netdev(net_dev);
+ d_fnend(3, dev, "SDIO func %p\n", func);
+}
+
+enum {
+ I2400MS_INTEL_VID = 0x89,
+};
+
+static
+const struct sdio_device_id i2400ms_sdio_ids[] = {
+ /* Intel: i2400m WiMAX over SDIO */
+ { SDIO_DEVICE(I2400MS_INTEL_VID, 0x1402) },
+ { }, /* end: all zeroes */
+};
+MODULE_DEVICE_TABLE(sdio, i2400ms_sdio_ids);
+
+
+static
+struct sdio_driver i2400m_sdio_driver = {
+ .name = KBUILD_MODNAME,
+ .probe = i2400ms_probe,
+ .remove = i2400ms_remove,
+ .id_table = i2400ms_sdio_ids,
+};
+
+
+static
+int __init i2400ms_driver_init(void)
+{
+ return sdio_register_driver(&i2400m_sdio_driver);
+}
+module_init(i2400ms_driver_init);
+
+
+static
+void __exit i2400ms_driver_exit(void)
+{
+ flush_scheduled_work(); /* for the stuff we schedule */
+ sdio_unregister_driver(&i2400m_sdio_driver);
+}
+module_exit(i2400ms_driver_exit);
+
+
+MODULE_AUTHOR("Intel Corporation ");
+MODULE_DESCRIPTION("Intel 2400M WiMAX networking for SDIO");
+MODULE_LICENSE("GPL");
+MODULE_FIRMWARE(I2400MS_FW_FILE_NAME);
diff --git a/drivers/net/wimax/i2400m/tx.c b/drivers/net/wimax/i2400m/tx.c
new file mode 100644
index 00000000000..613a88ffd65
--- /dev/null
+++ b/drivers/net/wimax/i2400m/tx.c
@@ -0,0 +1,817 @@
+/*
+ * Intel Wireless WiMAX Connection 2400m
+ * Generic (non-bus specific) TX handling
+ *
+ *
+ * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Intel Corporation
+ * Yanir Lubetkin
+ * - Initial implementation
+ *
+ * Intel Corporation
+ * Inaky Perez-Gonzalez
+ * - Rewritten to use a single FIFO to lower the memory allocation
+ * pressure and optimize cache hits when copying to the queue, as
+ * well as splitting out bus-specific code.
+ *
+ *
+ * Implements data transmission to the device; this is done through a
+ * software FIFO, as data/control frames can be coalesced (while the
+ * device is reading the previous tx transaction, others accumulate).
+ *
+ * A FIFO is used because at the end it is resource-cheaper that trying
+ * to implement scatter/gather over USB. As well, most traffic is going
+ * to be download (vs upload).
+ *
+ * The format for sending/receiving data to/from the i2400m is
+ * described in detail in rx.c:PROTOCOL FORMAT. In here we implement
+ * the transmission of that. This is split between a bus-independent
+ * part that just prepares everything and a bus-specific part that
+ * does the actual transmission over the bus to the device (in the
+ * bus-specific driver).
+ *
+ *
+ * The general format of a device-host transaction is MSG-HDR, PLD1,
+ * PLD2...PLDN, PL1, PL2,...PLN, PADDING.
+ *
+ * Because we need the send payload descriptors and then payloads and
+ * because it is kind of expensive to do scatterlists in USB (one URB
+ * per node), it becomes cheaper to append all the data to a FIFO
+ * (copying to a FIFO potentially in cache is cheaper).
+ *
+ * Then the bus-specific code takes the parts of that FIFO that are
+ * written and passes them to the device.
+ *
+ * So the concepts to keep in mind there are:
+ *
+ * We use a FIFO to queue the data in a linear buffer. We first append
+ * a MSG-HDR, space for I2400M_TX_PLD_MAX payload descriptors and then
+ * go appending payloads until we run out of space or of payload
+ * descriptors. Then we append padding to make the whole transaction a
+ * multiple of i2400m->bus_tx_block_size (as defined by the bus layer).
+ *
+ * - A TX message: a combination of a message header, payload
+ * descriptors and payloads.
+ *
+ * Open: it is marked as active (i2400m->tx_msg is valid) and we
+ * can keep adding payloads to it.
+ *
+ * Closed: we are not appending more payloads to this TX message
+ * (exahusted space in the queue, too many payloads or
+ * whichever). We have appended padding so the whole message
+ * length is aligned to i2400m->bus_tx_block_size (as set by the
+ * bus/transport layer).
+ *
+ * - Most of the time we keep a TX message open to which we append
+ * payloads.
+ *
+ * - If we are going to append and there is no more space (we are at
+ * the end of the FIFO), we close the message, mark the rest of the
+ * FIFO space unusable (skip_tail), create a new message at the
+ * beginning of the FIFO (if there is space) and append the message
+ * there.
+ *
+ * This is because we need to give linear TX messages to the bus
+ * engine. So we don't write a message to the remaining FIFO space
+ * until the tail and continue at the head of it.
+ *
+ * - We overload one of the fields in the message header to use it as
+ * 'size' of the TX message, so we can iterate over them. It also
+ * contains a flag that indicates if we have to skip it or not.
+ * When we send the buffer, we update that to its real on-the-wire
+ * value.
+ *
+ * - The MSG-HDR PLD1...PLD2 stuff has to be a size multiple of 16.
+ *
+ * It follows that if MSG-HDR says we have N messages, the whole
+ * header + descriptors is 16 + 4*N; for those to be a multiple of
+ * 16, it follows that N can be 4, 8, 12, ... (32, 48, 64, 80...
+ * bytes).
+ *
+ * So if we have only 1 payload, we have to submit a header that in
+ * all truth has space for 4.
+ *
+ * The implication is that we reserve space for 12 (64 bytes); but
+ * if we fill up only (eg) 2, our header becomes 32 bytes only. So
+ * the TX engine has to shift those 32 bytes of msg header and 2
+ * payloads and padding so that right after it the payloads start
+ * and the TX engine has to know about that.
+ *
+ * It is cheaper to move the header up than the whole payloads down.
+ *
+ * We do this in i2400m_tx_close(). See 'i2400m_msg_hdr->offset'.
+ *
+ * - Each payload has to be size-padded to 16 bytes; before appending
+ * it, we just do it.
+ *
+ * - The whole message has to be padded to i2400m->bus_tx_block_size;
+ * we do this at close time. Thus, when reserving space for the
+ * payload, we always make sure there is also free space for this
+ * padding that sooner or later will happen.
+ *
+ * When we append a message, we tell the bus specific code to kick in
+ * TXs. It will TX (in parallel) until the buffer is exhausted--hence
+ * the lockin we do. The TX code will only send a TX message at the
+ * time (which remember, might contain more than one payload). Of
+ * course, when the bus-specific driver attempts to TX a message that
+ * is still open, it gets closed first.
+ *
+ * Gee, this is messy; well a picture. In the example below we have a
+ * partially full FIFO, with a closed message ready to be delivered
+ * (with a moved message header to make sure it is size-aligned to
+ * 16), TAIL room that was unusable (and thus is marked with a message
+ * header that says 'skip this') and at the head of the buffer, an
+ * imcomplete message with a couple of payloads.
+ *
+ * N ___________________________________________________
+ * | |
+ * | TAIL room |
+ * | |
+ * | msg_hdr to skip (size |= 0x80000) |
+ * |---------------------------------------------------|-------
+ * | | /|\
+ * | | |
+ * | TX message padding | |
+ * | | |
+ * | | |
+ * |- - - - - - - - - - - - - - - - - - - - - - - - - -| |
+ * | | |
+ * | payload 1 | |
+ * | | N * tx_block_size
+ * | | |
+ * |- - - - - - - - - - - - - - - - - - - - - - - - - -| |
+ * | | |
+ * | payload 1 | |
+ * | | |
+ * | | |
+ * |- - - - - - - - - - - - - - - - - - - - - - - - - -|- -|- - - -
+ * | padding 3 /|\ | | /|\
+ * | padding 2 | | | |
+ * | pld 1 32 bytes (2 * 16) | | |
+ * | pld 0 | | | |
+ * | moved msg_hdr \|/ | \|/ |
+ * |- - - - - - - - - - - - - - - - - - - - - - - - - -|- - - |
+ * | | _PLD_SIZE
+ * | unused | |
+ * | | |
+ * |- - - - - - - - - - - - - - - - - - - - - - - - - -| |
+ * | msg_hdr (size X) [this message is closed] | \|/
+ * |===================================================|========== <=== OUT
+ * | |
+ * | |
+ * | |
+ * | Free rooom |
+ * | |
+ * | |
+ * | |
+ * | |
+ * | |
+ * | |
+ * | |
+ * | |
+ * | |
+ * |===================================================|========== <=== IN
+ * | |
+ * | |
+ * | |
+ * | |
+ * | payload 1 |
+ * | |
+ * | |
+ * |- - - - - - - - - - - - - - - - - - - - - - - - - -|
+ * | |
+ * | payload 0 |
+ * | |
+ * | |
+ * |- - - - - - - - - - - - - - - - - - - - - - - - - -|
+ * | pld 11 /|\ |
+ * | ... | |
+ * | pld 1 64 bytes (2 * 16) |
+ * | pld 0 | |
+ * | msg_hdr (size X) \|/ [message is open] |
+ * 0 ---------------------------------------------------
+ *
+ *
+ * ROADMAP
+ *
+ * i2400m_tx_setup() Called by i2400m_setup
+ * i2400m_tx_release() Called by i2400m_release()
+ *
+ * i2400m_tx() Called to send data or control frames
+ * i2400m_tx_fifo_push() Allocates append-space in the FIFO
+ * i2400m_tx_new() Opens a new message in the FIFO
+ * i2400m_tx_fits() Checks if a new payload fits in the message
+ * i2400m_tx_close() Closes an open message in the FIFO
+ * i2400m_tx_skip_tail() Marks unusable FIFO tail space
+ * i2400m->bus_tx_kick()
+ *
+ * Now i2400m->bus_tx_kick() is the the bus-specific driver backend
+ * implementation; that would do:
+ *
+ * i2400m->bus_tx_kick()
+ * i2400m_tx_msg_get() Gets first message ready to go
+ * ...sends it...
+ * i2400m_tx_msg_sent() Ack the message is sent; repeat from
+ * _tx_msg_get() until it returns NULL
+ * (FIFO empty).
+ */
+#include
+#include "i2400m.h"
+
+
+#define D_SUBMODULE tx
+#include "debug-levels.h"
+
+enum {
+ /**
+ * TX Buffer size
+ *
+ * Doc says maximum transaction is 16KiB. If we had 16KiB en
+ * route and 16KiB being queued, it boils down to needing
+ * 32KiB.
+ */
+ I2400M_TX_BUF_SIZE = 32768,
+ /**
+ * Message header and payload descriptors have to be 16
+ * aligned (16 + 4 * N = 16 * M). If we take that average sent
+ * packets are MTU size (~1400-~1500) it follows that we could
+ * fit at most 10-11 payloads in one transaction. To meet the
+ * alignment requirement, that means we need to leave space
+ * for 12 (64 bytes). To simplify, we leave space for that. If
+ * at the end there are less, we pad up to the nearest
+ * multiple of 16.
+ */
+ I2400M_TX_PLD_MAX = 12,
+ I2400M_TX_PLD_SIZE = sizeof(struct i2400m_msg_hdr)
+ + I2400M_TX_PLD_MAX * sizeof(struct i2400m_pld),
+ I2400M_TX_SKIP = 0x80000000,
+};
+
+#define TAIL_FULL ((void *)~(unsigned long)NULL)
+
+/*
+ * Allocate @size bytes in the TX fifo, return a pointer to it
+ *
+ * @i2400m: device descriptor
+ * @size: size of the buffer we need to allocate
+ * @padding: ensure that there is at least this many bytes of free
+ * contiguous space in the fifo. This is needed because later on
+ * we might need to add padding.
+ *
+ * Returns:
+ *
+ * Pointer to the allocated space. NULL if there is no
+ * space. TAIL_FULL if there is no space at the tail but there is at
+ * the head (Case B below).
+ *
+ * These are the two basic cases we need to keep an eye for -- it is
+ * much better explained in linux/kernel/kfifo.c, but this code
+ * basically does the same. No rocket science here.
+ *
+ * Case A Case B
+ * N ___________ ___________
+ * | tail room | | data |
+ * | | | |
+ * |<- IN ->| |<- OUT ->|
+ * | | | |
+ * | data | | room |
+ * | | | |
+ * |<- OUT ->| |<- IN ->|
+ * | | | |
+ * | head room | | data |
+ * 0 ----------- -----------
+ *
+ * We allocate only *contiguous* space.
+ *
+ * We can allocate only from 'room'. In Case B, it is simple; in case
+ * A, we only try from the tail room; if it is not enough, we just
+ * fail and return TAIL_FULL and let the caller figure out if we wants to
+ * skip the tail room and try to allocate from the head.
+ *
+ * Note:
+ *
+ * Assumes i2400m->tx_lock is taken, and we use that as a barrier
+ *
+ * The indexes keep increasing and we reset them to zero when we
+ * pop data off the queue
+ */
+static
+void *i2400m_tx_fifo_push(struct i2400m *i2400m, size_t size, size_t padding)
+{
+ struct device *dev = i2400m_dev(i2400m);
+ size_t room, tail_room, needed_size;
+ void *ptr;
+
+ needed_size = size + padding;
+ room = I2400M_TX_BUF_SIZE - (i2400m->tx_in - i2400m->tx_out);
+ if (room < needed_size) { /* this takes care of Case B */
+ d_printf(2, dev, "fifo push %zu/%zu: no space\n",
+ size, padding);
+ return NULL;
+ }
+ /* Is there space at the tail? */
+ tail_room = I2400M_TX_BUF_SIZE - i2400m->tx_in % I2400M_TX_BUF_SIZE;
+ if (tail_room < needed_size) {
+ if (i2400m->tx_out % I2400M_TX_BUF_SIZE
+ < i2400m->tx_in % I2400M_TX_BUF_SIZE) {
+ d_printf(2, dev, "fifo push %zu/%zu: tail full\n",
+ size, padding);
+ return TAIL_FULL; /* There might be head space */
+ } else {
+ d_printf(2, dev, "fifo push %zu/%zu: no head space\n",
+ size, padding);
+ return NULL; /* There is no space */
+ }
+ }
+ ptr = i2400m->tx_buf + i2400m->tx_in % I2400M_TX_BUF_SIZE;
+ d_printf(2, dev, "fifo push %zu/%zu: at @%zu\n", size, padding,
+ i2400m->tx_in % I2400M_TX_BUF_SIZE);
+ i2400m->tx_in += size;
+ return ptr;
+}
+
+
+/*
+ * Mark the tail of the FIFO buffer as 'to-skip'
+ *
+ * We should never hit the BUG_ON() because all the sizes we push to
+ * the FIFO are padded to be a multiple of 16 -- the size of *msg
+ * (I2400M_PL_PAD for the payloads, I2400M_TX_PLD_SIZE for the
+ * header).
+ *
+ * Note:
+ *
+ * Assumes i2400m->tx_lock is taken, and we use that as a barrier
+ */
+static
+void i2400m_tx_skip_tail(struct i2400m *i2400m)
+{
+ struct device *dev = i2400m_dev(i2400m);
+ size_t tx_in = i2400m->tx_in % I2400M_TX_BUF_SIZE;
+ size_t tail_room = I2400M_TX_BUF_SIZE - tx_in;
+ struct i2400m_msg_hdr *msg = i2400m->tx_buf + tx_in;
+ BUG_ON(tail_room < sizeof(*msg));
+ msg->size = tail_room | I2400M_TX_SKIP;
+ d_printf(2, dev, "skip tail: skipping %zu bytes @%zu\n",
+ tail_room, tx_in);
+ i2400m->tx_in += tail_room;
+}
+
+
+/*
+ * Check if a skb will fit in the TX queue's current active TX
+ * message (if there are still descriptors left unused).
+ *
+ * Returns:
+ * 0 if the message won't fit, 1 if it will.
+ *
+ * Note:
+ *
+ * Assumes a TX message is active (i2400m->tx_msg).
+ *
+ * Assumes i2400m->tx_lock is taken, and we use that as a barrier
+ */
+static
+unsigned i2400m_tx_fits(struct i2400m *i2400m)
+{
+ struct i2400m_msg_hdr *msg_hdr = i2400m->tx_msg;
+ return le16_to_cpu(msg_hdr->num_pls) < I2400M_TX_PLD_MAX;
+
+}
+
+
+/*
+ * Start a new TX message header in the queue.
+ *
+ * Reserve memory from the base FIFO engine and then just initialize
+ * the message header.
+ *
+ * We allocate the biggest TX message header we might need (one that'd
+ * fit I2400M_TX_PLD_MAX payloads) -- when it is closed it will be
+ * 'ironed it out' and the unneeded parts removed.
+ *
+ * NOTE:
+ *
+ * Assumes that the previous message is CLOSED (eg: either
+ * there was none or 'i2400m_tx_close()' was called on it).
+ *
+ * Assumes i2400m->tx_lock is taken, and we use that as a barrier
+ */
+static
+void i2400m_tx_new(struct i2400m *i2400m)
+{
+ struct device *dev = i2400m_dev(i2400m);
+ struct i2400m_msg_hdr *tx_msg;
+ BUG_ON(i2400m->tx_msg != NULL);
+try_head:
+ tx_msg = i2400m_tx_fifo_push(i2400m, I2400M_TX_PLD_SIZE, 0);
+ if (tx_msg == NULL)
+ goto out;
+ else if (tx_msg == TAIL_FULL) {
+ i2400m_tx_skip_tail(i2400m);
+ d_printf(2, dev, "new TX message: tail full, trying head\n");
+ goto try_head;
+ }
+ memset(tx_msg, 0, I2400M_TX_PLD_SIZE);
+ tx_msg->size = I2400M_TX_PLD_SIZE;
+out:
+ i2400m->tx_msg = tx_msg;
+ d_printf(2, dev, "new TX message: %p @%zu\n",
+ tx_msg, (void *) tx_msg - i2400m->tx_buf);
+}
+
+
+/*
+ * Finalize the current TX message header
+ *
+ * Sets the message header to be at the proper location depending on
+ * how many descriptors we have (check documentation at the file's
+ * header for more info on that).
+ *
+ * Appends padding bytes to make sure the whole TX message (counting
+ * from the 'relocated' message header) is aligned to
+ * tx_block_size. We assume the _append() code has left enough space
+ * in the FIFO for that. If there are no payloads, just pass, as it
+ * won't be transferred.
+ *
+ * The amount of padding bytes depends on how many payloads are in the
+ * TX message, as the "msg header and payload descriptors" will be
+ * shifted up in the buffer.
+ */
+static
+void i2400m_tx_close(struct i2400m *i2400m)
+{
+ struct device *dev = i2400m_dev(i2400m);
+ struct i2400m_msg_hdr *tx_msg = i2400m->tx_msg;
+ struct i2400m_msg_hdr *tx_msg_moved;
+ size_t aligned_size, padding, hdr_size;
+ void *pad_buf;
+
+ if (tx_msg->size & I2400M_TX_SKIP) /* a skipper? nothing to do */
+ goto out;
+
+ /* Relocate the message header
+ *
+ * Find the current header size, align it to 16 and if we need
+ * to move it so the tail is next to the payloads, move it and
+ * set the offset.
+ *
+ * If it moved, this header is good only for transmission; the
+ * original one (it is kept if we moved) is still used to
+ * figure out where the next TX message starts (and where the
+ * offset to the moved header is).
+ */
+ hdr_size = sizeof(*tx_msg)
+ + le16_to_cpu(tx_msg->num_pls) * sizeof(tx_msg->pld[0]);
+ hdr_size = ALIGN(hdr_size, I2400M_PL_PAD);
+ tx_msg->offset = I2400M_TX_PLD_SIZE - hdr_size;
+ tx_msg_moved = (void *) tx_msg + tx_msg->offset;
+ memmove(tx_msg_moved, tx_msg, hdr_size);
+ tx_msg_moved->size -= tx_msg->offset;
+ /*
+ * Now figure out how much we have to add to the (moved!)
+ * message so the size is a multiple of i2400m->bus_tx_block_size.
+ */
+ aligned_size = ALIGN(tx_msg_moved->size, i2400m->bus_tx_block_size);
+ padding = aligned_size - tx_msg_moved->size;
+ if (padding > 0) {
+ pad_buf = i2400m_tx_fifo_push(i2400m, padding, 0);
+ if (unlikely(WARN_ON(pad_buf == NULL
+ || pad_buf == TAIL_FULL))) {
+ /* This should not happen -- append should verify
+ * there is always space left at least to append
+ * tx_block_size */
+ dev_err(dev,
+ "SW BUG! Possible data leakage from memory the "
+ "device should not read for padding - "
+ "size %lu aligned_size %zu tx_buf %p in "
+ "%zu out %zu\n",
+ (unsigned long) tx_msg_moved->size,
+ aligned_size, i2400m->tx_buf, i2400m->tx_in,
+ i2400m->tx_out);
+ } else
+ memset(pad_buf, 0xad, padding);
+ }
+ tx_msg_moved->padding = cpu_to_le16(padding);
+ tx_msg_moved->size += padding;
+ if (tx_msg != tx_msg_moved)
+ tx_msg->size += padding;
+out:
+ i2400m->tx_msg = NULL;
+}
+
+
+/**
+ * i2400m_tx - send the data in a buffer to the device
+ *
+ * @buf: pointer to the buffer to transmit
+ *
+ * @buf_len: buffer size
+ *
+ * @pl_type: type of the payload we are sending.
+ *
+ * Returns:
+ * 0 if ok, < 0 errno code on error (-ENOSPC, if there is no more
+ * room for the message in the queue).
+ *
+ * Appends the buffer to the TX FIFO and notifies the bus-specific
+ * part of the driver that there is new data ready to transmit.
+ * Once this function returns, the buffer has been copied, so it can
+ * be reused.
+ *
+ * The steps followed to append are explained in detail in the file
+ * header.
+ *
+ * Whenever we write to a message, we increase msg->size, so it
+ * reflects exactly how big the message is. This is needed so that if
+ * we concatenate two messages before they can be sent, the code that
+ * sends the messages can find the boundaries (and it will replace the
+ * size with the real barker before sending).
+ *
+ * Note:
+ *
+ * Cold and warm reset payloads need to be sent as a single
+ * payload, so we handle that.
+ */
+int i2400m_tx(struct i2400m *i2400m, const void *buf, size_t buf_len,
+ enum i2400m_pt pl_type)
+{
+ int result = -ENOSPC;
+ struct device *dev = i2400m_dev(i2400m);
+ unsigned long flags;
+ size_t padded_len;
+ void *ptr;
+ unsigned is_singleton = pl_type == I2400M_PT_RESET_WARM
+ || pl_type == I2400M_PT_RESET_COLD;
+
+ d_fnstart(3, dev, "(i2400m %p skb %p [%zu bytes] pt %u)\n",
+ i2400m, buf, buf_len, pl_type);
+ padded_len = ALIGN(buf_len, I2400M_PL_PAD);
+ d_printf(5, dev, "padded_len %zd buf_len %zd\n", padded_len, buf_len);
+ /* If there is no current TX message, create one; if the
+ * current one is out of payload slots or we have a singleton,
+ * close it and start a new one */
+ spin_lock_irqsave(&i2400m->tx_lock, flags);
+try_new:
+ if (unlikely(i2400m->tx_msg == NULL))
+ i2400m_tx_new(i2400m);
+ else if (unlikely(!i2400m_tx_fits(i2400m)
+ || (is_singleton && i2400m->tx_msg->num_pls != 0))) {
+ d_printf(2, dev, "closing TX message (fits %u singleton "
+ "%u num_pls %u)\n", i2400m_tx_fits(i2400m),
+ is_singleton, i2400m->tx_msg->num_pls);
+ i2400m_tx_close(i2400m);
+ i2400m_tx_new(i2400m);
+ }
+ if (i2400m->tx_msg->size + padded_len > I2400M_TX_BUF_SIZE / 2) {
+ d_printf(2, dev, "TX: message too big, going new\n");
+ i2400m_tx_close(i2400m);
+ i2400m_tx_new(i2400m);
+ }
+ if (i2400m->tx_msg == NULL)
+ goto error_tx_new;
+ /* So we have a current message header; now append space for
+ * the message -- if there is not enough, try the head */
+ ptr = i2400m_tx_fifo_push(i2400m, padded_len,
+ i2400m->bus_tx_block_size);
+ if (ptr == TAIL_FULL) { /* Tail is full, try head */
+ d_printf(2, dev, "pl append: tail full\n");
+ i2400m_tx_close(i2400m);
+ i2400m_tx_skip_tail(i2400m);
+ goto try_new;
+ } else if (ptr == NULL) { /* All full */
+ result = -ENOSPC;
+ d_printf(2, dev, "pl append: all full\n");
+ } else { /* Got space, copy it, set padding */
+ struct i2400m_msg_hdr *tx_msg = i2400m->tx_msg;
+ unsigned num_pls = le16_to_cpu(tx_msg->num_pls);
+ memcpy(ptr, buf, buf_len);
+ memset(ptr + buf_len, 0xad, padded_len - buf_len);
+ i2400m_pld_set(&tx_msg->pld[num_pls], buf_len, pl_type);
+ d_printf(3, dev, "pld 0x%08x (type 0x%1x len 0x%04zx\n",
+ le32_to_cpu(tx_msg->pld[num_pls].val),
+ pl_type, buf_len);
+ tx_msg->num_pls = le16_to_cpu(num_pls+1);
+ tx_msg->size += padded_len;
+ d_printf(2, dev, "TX: appended %zu b (up to %u b) pl #%u \n",
+ padded_len, tx_msg->size, num_pls+1);
+ d_printf(2, dev,
+ "TX: appended hdr @%zu %zu b pl #%u @%zu %zu/%zu b\n",
+ (void *)tx_msg - i2400m->tx_buf, (size_t)tx_msg->size,
+ num_pls+1, ptr - i2400m->tx_buf, buf_len, padded_len);
+ result = 0;
+ if (is_singleton)
+ i2400m_tx_close(i2400m);
+ }
+error_tx_new:
+ spin_unlock_irqrestore(&i2400m->tx_lock, flags);
+ i2400m->bus_tx_kick(i2400m); /* always kick, might free up space */
+ d_fnend(3, dev, "(i2400m %p skb %p [%zu bytes] pt %u) = %d\n",
+ i2400m, buf, buf_len, pl_type, result);
+ return result;
+}
+EXPORT_SYMBOL_GPL(i2400m_tx);
+
+
+/**
+ * i2400m_tx_msg_get - Get the first TX message in the FIFO to start sending it
+ *
+ * @i2400m: device descriptors
+ * @bus_size: where to place the size of the TX message
+ *
+ * Called by the bus-specific driver to get the first TX message at
+ * the FIF that is ready for transmission.
+ *
+ * It sets the state in @i2400m to indicate the bus-specific driver is
+ * transfering that message (i2400m->tx_msg_size).
+ *
+ * Once the transfer is completed, call i2400m_tx_msg_sent().
+ *
+ * Notes:
+ *
+ * The size of the TX message to be transmitted might be smaller than
+ * that of the TX message in the FIFO (in case the header was
+ * shorter). Hence, we copy it in @bus_size, for the bus layer to
+ * use. We keep the message's size in i2400m->tx_msg_size so that
+ * when the bus later is done transferring we know how much to
+ * advance the fifo.
+ *
+ * We collect statistics here as all the data is available and we
+ * assume it is going to work [see i2400m_tx_msg_sent()].
+ */
+struct i2400m_msg_hdr *i2400m_tx_msg_get(struct i2400m *i2400m,
+ size_t *bus_size)
+{
+ struct device *dev = i2400m_dev(i2400m);
+ struct i2400m_msg_hdr *tx_msg, *tx_msg_moved;
+ unsigned long flags, pls;
+
+ d_fnstart(3, dev, "(i2400m %p bus_size %p)\n", i2400m, bus_size);
+ spin_lock_irqsave(&i2400m->tx_lock, flags);
+skip:
+ tx_msg_moved = NULL;
+ if (i2400m->tx_in == i2400m->tx_out) { /* Empty FIFO? */
+ i2400m->tx_in = 0;
+ i2400m->tx_out = 0;
+ d_printf(2, dev, "TX: FIFO empty: resetting\n");
+ goto out_unlock;
+ }
+ tx_msg = i2400m->tx_buf + i2400m->tx_out % I2400M_TX_BUF_SIZE;
+ if (tx_msg->size & I2400M_TX_SKIP) { /* skip? */
+ d_printf(2, dev, "TX: skip: msg @%zu (%zu b)\n",
+ i2400m->tx_out % I2400M_TX_BUF_SIZE,
+ (size_t) tx_msg->size & ~I2400M_TX_SKIP);
+ i2400m->tx_out += tx_msg->size & ~I2400M_TX_SKIP;
+ goto skip;
+ }
+
+ if (tx_msg->num_pls == 0) { /* No payloads? */
+ if (tx_msg == i2400m->tx_msg) { /* open, we are done */
+ d_printf(2, dev,
+ "TX: FIFO empty: open msg w/o payloads @%zu\n",
+ (void *) tx_msg - i2400m->tx_buf);
+ tx_msg = NULL;
+ goto out_unlock;
+ } else { /* closed, skip it */
+ d_printf(2, dev,
+ "TX: skip msg w/o payloads @%zu (%zu b)\n",
+ (void *) tx_msg - i2400m->tx_buf,
+ (size_t) tx_msg->size);
+ i2400m->tx_out += tx_msg->size & ~I2400M_TX_SKIP;
+ goto skip;
+ }
+ }
+ if (tx_msg == i2400m->tx_msg) /* open msg? */
+ i2400m_tx_close(i2400m);
+
+ /* Now we have a valid TX message (with payloads) to TX */
+ tx_msg_moved = (void *) tx_msg + tx_msg->offset;
+ i2400m->tx_msg_size = tx_msg->size;
+ *bus_size = tx_msg_moved->size;
+ d_printf(2, dev, "TX: pid %d msg hdr at @%zu offset +@%zu "
+ "size %zu bus_size %zu\n",
+ current->pid, (void *) tx_msg - i2400m->tx_buf,
+ (size_t) tx_msg->offset, (size_t) tx_msg->size,
+ (size_t) tx_msg_moved->size);
+ tx_msg_moved->barker = le32_to_cpu(I2400M_H2D_PREVIEW_BARKER);
+ tx_msg_moved->sequence = le32_to_cpu(i2400m->tx_sequence++);
+
+ pls = le32_to_cpu(tx_msg_moved->num_pls);
+ i2400m->tx_pl_num += pls; /* Update stats */
+ if (pls > i2400m->tx_pl_max)
+ i2400m->tx_pl_max = pls;
+ if (pls < i2400m->tx_pl_min)
+ i2400m->tx_pl_min = pls;
+ i2400m->tx_num++;
+ i2400m->tx_size_acc += *bus_size;
+ if (*bus_size < i2400m->tx_size_min)
+ i2400m->tx_size_min = *bus_size;
+ if (*bus_size > i2400m->tx_size_max)
+ i2400m->tx_size_max = *bus_size;
+out_unlock:
+ spin_unlock_irqrestore(&i2400m->tx_lock, flags);
+ d_fnstart(3, dev, "(i2400m %p bus_size %p [%zu]) = %p\n",
+ i2400m, bus_size, *bus_size, tx_msg_moved);
+ return tx_msg_moved;
+}
+EXPORT_SYMBOL_GPL(i2400m_tx_msg_get);
+
+
+/**
+ * i2400m_tx_msg_sent - indicate the transmission of a TX message
+ *
+ * @i2400m: device descriptor
+ *
+ * Called by the bus-specific driver when a message has been sent;
+ * this pops it from the FIFO; and as there is space, start the queue
+ * in case it was stopped.
+ *
+ * Should be called even if the message send failed and we are
+ * dropping this TX message.
+ */
+void i2400m_tx_msg_sent(struct i2400m *i2400m)
+{
+ unsigned n;
+ unsigned long flags;
+ struct device *dev = i2400m_dev(i2400m);
+
+ d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
+ spin_lock_irqsave(&i2400m->tx_lock, flags);
+ i2400m->tx_out += i2400m->tx_msg_size;
+ d_printf(2, dev, "TX: sent %zu b\n", (size_t) i2400m->tx_msg_size);
+ i2400m->tx_msg_size = 0;
+ BUG_ON(i2400m->tx_out > i2400m->tx_in);
+ /* level them FIFO markers off */
+ n = i2400m->tx_out / I2400M_TX_BUF_SIZE;
+ i2400m->tx_out %= I2400M_TX_BUF_SIZE;
+ i2400m->tx_in -= n * I2400M_TX_BUF_SIZE;
+ netif_start_queue(i2400m->wimax_dev.net_dev);
+ spin_unlock_irqrestore(&i2400m->tx_lock, flags);
+ d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
+}
+EXPORT_SYMBOL_GPL(i2400m_tx_msg_sent);
+
+
+/**
+ * i2400m_tx_setup - Initialize the TX queue and infrastructure
+ *
+ * Make sure we reset the TX sequence to zero, as when this function
+ * is called, the firmware has been just restarted.
+ */
+int i2400m_tx_setup(struct i2400m *i2400m)
+{
+ int result;
+
+ /* Do this here only once -- can't do on
+ * i2400m_hard_start_xmit() as we'll cause race conditions if
+ * the WS was scheduled on another CPU */
+ INIT_WORK(&i2400m->wake_tx_ws, i2400m_wake_tx_work);
+
+ i2400m->tx_sequence = 0;
+ i2400m->tx_buf = kmalloc(I2400M_TX_BUF_SIZE, GFP_KERNEL);
+ if (i2400m->tx_buf == NULL)
+ result = -ENOMEM;
+ else
+ result = 0;
+ /* Huh? the bus layer has to define this... */
+ BUG_ON(i2400m->bus_tx_block_size == 0);
+ return result;
+
+}
+
+
+/**
+ * i2400m_tx_release - Tear down the TX queue and infrastructure
+ */
+void i2400m_tx_release(struct i2400m *i2400m)
+{
+ kfree(i2400m->tx_buf);
+}
diff --git a/drivers/net/wimax/i2400m/usb-debug-levels.h b/drivers/net/wimax/i2400m/usb-debug-levels.h
new file mode 100644
index 00000000000..e4358bd880b
--- /dev/null
+++ b/drivers/net/wimax/i2400m/usb-debug-levels.h
@@ -0,0 +1,42 @@
+/*
+ * Intel Wireless WiMAX Connection 2400m
+ * Debug levels control file for the i2400m-usb module
+ *
+ *
+ * Copyright (C) 2007-2008 Intel Corporation
+ * Inaky Perez-Gonzalez
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+#ifndef __debug_levels__h__
+#define __debug_levels__h__
+
+/* Maximum compile and run time debug level for all submodules */
+#define D_MODULENAME i2400m_usb
+#define D_MASTER CONFIG_WIMAX_I2400M_DEBUG_LEVEL
+
+#include
+
+/* List of all the enabled modules */
+enum d_module {
+ D_SUBMODULE_DECLARE(usb),
+ D_SUBMODULE_DECLARE(fw),
+ D_SUBMODULE_DECLARE(notif),
+ D_SUBMODULE_DECLARE(rx),
+ D_SUBMODULE_DECLARE(tx),
+};
+
+
+#endif /* #ifndef __debug_levels__h__ */
diff --git a/drivers/net/wimax/i2400m/usb-fw.c b/drivers/net/wimax/i2400m/usb-fw.c
new file mode 100644
index 00000000000..5ad287c228b
--- /dev/null
+++ b/drivers/net/wimax/i2400m/usb-fw.c
@@ -0,0 +1,340 @@
+/*
+ * Intel Wireless WiMAX Connection 2400m
+ * Firmware uploader's USB specifics
+ *
+ *
+ * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Intel Corporation
+ * Yanir Lubetkin
+ * Inaky Perez-Gonzalez
+ * - Initial implementation
+ *
+ * Inaky Perez-Gonzalez
+ * - bus generic/specific split
+ *
+ * THE PROCEDURE
+ *
+ * See fw.c for the generic description of this procedure.
+ *
+ * This file implements only the USB specifics. It boils down to how
+ * to send a command and waiting for an acknowledgement from the
+ * device.
+ *
+ * This code (and process) is single threaded. It assumes it is the
+ * only thread poking around (guaranteed by fw.c).
+ *
+ * COMMAND EXECUTION
+ *
+ * A write URB is posted with the buffer to the bulk output endpoint.
+ *
+ * ACK RECEPTION
+ *
+ * We just post a URB to the notification endpoint and wait for
+ * data. We repeat until we get all the data we expect (as indicated
+ * by the call from the bus generic code).
+ *
+ * The data is not read from the bulk in endpoint for boot mode.
+ *
+ * ROADMAP
+ *
+ * i2400mu_bus_bm_cmd_send
+ * i2400m_bm_cmd_prepare...
+ * i2400mu_tx_bulk_out
+ *
+ * i2400mu_bus_bm_wait_for_ack
+ * i2400m_notif_submit
+ */
+#include
+#include "i2400m-usb.h"
+
+
+#define D_SUBMODULE fw
+#include "usb-debug-levels.h"
+
+
+/*
+ * Synchronous write to the device
+ *
+ * Takes care of updating EDC counts and thus, handle device errors.
+ */
+static
+ssize_t i2400mu_tx_bulk_out(struct i2400mu *i2400mu, void *buf, size_t buf_size)
+{
+ int result;
+ struct device *dev = &i2400mu->usb_iface->dev;
+ int len;
+ struct usb_endpoint_descriptor *epd;
+ int pipe, do_autopm = 1;
+
+ result = usb_autopm_get_interface(i2400mu->usb_iface);
+ if (result < 0) {
+ dev_err(dev, "BM-CMD: can't get autopm: %d\n", result);
+ do_autopm = 0;
+ }
+ epd = usb_get_epd(i2400mu->usb_iface, I2400MU_EP_BULK_OUT);
+ pipe = usb_sndbulkpipe(i2400mu->usb_dev, epd->bEndpointAddress);
+retry:
+ result = usb_bulk_msg(i2400mu->usb_dev, pipe, buf, buf_size, &len, HZ);
+ switch (result) {
+ case 0:
+ if (len != buf_size) {
+ dev_err(dev, "BM-CMD: short write (%u B vs %zu "
+ "expected)\n", len, buf_size);
+ result = -EIO;
+ break;
+ }
+ result = len;
+ break;
+ case -EINVAL: /* while removing driver */
+ case -ENODEV: /* dev disconnect ... */
+ case -ENOENT: /* just ignore it */
+ case -ESHUTDOWN: /* and exit */
+ case -ECONNRESET:
+ result = -ESHUTDOWN;
+ break;
+ case -ETIMEDOUT: /* bah... */
+ break;
+ default: /* any other? */
+ if (edc_inc(&i2400mu->urb_edc,
+ EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
+ dev_err(dev, "BM-CMD: maximum errors in "
+ "URB exceeded; resetting device\n");
+ usb_queue_reset_device(i2400mu->usb_iface);
+ result = -ENODEV;
+ break;
+ }
+ dev_err(dev, "BM-CMD: URB error %d, retrying\n",
+ result);
+ goto retry;
+ }
+ result = len;
+ if (do_autopm)
+ usb_autopm_put_interface(i2400mu->usb_iface);
+ return result;
+}
+
+
+/*
+ * Send a boot-mode command over the bulk-out pipe
+ *
+ * Command can be a raw command, which requires no preparation (and
+ * which might not even be following the command format). Checks that
+ * the right amount of data was transfered.
+ *
+ * To satisfy USB requirements (no onstack, vmalloc or in data segment
+ * buffers), we copy the command to i2400m->bm_cmd_buf and send it from
+ * there.
+ *
+ * @flags: pass thru from i2400m_bm_cmd()
+ * @return: cmd_size if ok, < 0 errno code on error.
+ */
+ssize_t i2400mu_bus_bm_cmd_send(struct i2400m *i2400m,
+ const struct i2400m_bootrom_header *_cmd,
+ size_t cmd_size, int flags)
+{
+ ssize_t result;
+ struct device *dev = i2400m_dev(i2400m);
+ struct i2400mu *i2400mu = container_of(i2400m, struct i2400mu, i2400m);
+ int opcode = _cmd == NULL ? -1 : i2400m_brh_get_opcode(_cmd);
+ struct i2400m_bootrom_header *cmd;
+ size_t cmd_size_a = ALIGN(cmd_size, 16); /* USB restriction */
+
+ d_fnstart(8, dev, "(i2400m %p cmd %p size %zu)\n",
+ i2400m, _cmd, cmd_size);
+ result = -E2BIG;
+ if (cmd_size > I2400M_BM_CMD_BUF_SIZE)
+ goto error_too_big;
+ memcpy(i2400m->bm_cmd_buf, _cmd, cmd_size);
+ cmd = i2400m->bm_cmd_buf;
+ if (cmd_size_a > cmd_size) /* Zero pad space */
+ memset(i2400m->bm_cmd_buf + cmd_size, 0, cmd_size_a - cmd_size);
+ if ((flags & I2400M_BM_CMD_RAW) == 0) {
+ if (WARN_ON(i2400m_brh_get_response_required(cmd) == 0))
+ dev_warn(dev, "SW BUG: response_required == 0\n");
+ i2400m_bm_cmd_prepare(cmd);
+ }
+ result = i2400mu_tx_bulk_out(i2400mu, i2400m->bm_cmd_buf, cmd_size);
+ if (result < 0) {
+ dev_err(dev, "boot-mode cmd %d: cannot send: %zd\n",
+ opcode, result);
+ goto error_cmd_send;
+ }
+ if (result != cmd_size) { /* all was transferred? */
+ dev_err(dev, "boot-mode cmd %d: incomplete transfer "
+ "(%zu vs %zu submitted)\n", opcode, result, cmd_size);
+ result = -EIO;
+ goto error_cmd_size;
+ }
+error_cmd_size:
+error_cmd_send:
+error_too_big:
+ d_fnend(8, dev, "(i2400m %p cmd %p size %zu) = %zd\n",
+ i2400m, _cmd, cmd_size, result);
+ return result;
+}
+
+
+static
+void __i2400mu_bm_notif_cb(struct urb *urb)
+{
+ complete(urb->context);
+}
+
+
+/*
+ * submit a read to the notification endpoint
+ *
+ * @i2400m: device descriptor
+ * @urb: urb to use
+ * @completion: completion varible to complete when done
+ *
+ * Data is always read to i2400m->bm_ack_buf
+ */
+static
+int i2400mu_notif_submit(struct i2400mu *i2400mu, struct urb *urb,
+ struct completion *completion)
+{
+ struct i2400m *i2400m = &i2400mu->i2400m;
+ struct usb_endpoint_descriptor *epd;
+ int pipe;
+
+ epd = usb_get_epd(i2400mu->usb_iface, I2400MU_EP_NOTIFICATION);
+ pipe = usb_rcvintpipe(i2400mu->usb_dev, epd->bEndpointAddress);
+ usb_fill_int_urb(urb, i2400mu->usb_dev, pipe,
+ i2400m->bm_ack_buf, I2400M_BM_ACK_BUF_SIZE,
+ __i2400mu_bm_notif_cb, completion,
+ epd->bInterval);
+ return usb_submit_urb(urb, GFP_KERNEL);
+}
+
+
+/*
+ * Read an ack from the notification endpoint
+ *
+ * @i2400m:
+ * @_ack: pointer to where to store the read data
+ * @ack_size: how many bytes we should read
+ *
+ * Returns: < 0 errno code on error; otherwise, amount of received bytes.
+ *
+ * Submits a notification read, appends the read data to the given ack
+ * buffer and then repeats (until @ack_size bytes have been
+ * received).
+ */
+ssize_t i2400mu_bus_bm_wait_for_ack(struct i2400m *i2400m,
+ struct i2400m_bootrom_header *_ack,
+ size_t ack_size)
+{
+ ssize_t result = -ENOMEM;
+ struct device *dev = i2400m_dev(i2400m);
+ struct i2400mu *i2400mu = container_of(i2400m, struct i2400mu, i2400m);
+ struct urb notif_urb;
+ void *ack = _ack;
+ size_t offset, len;
+ long val;
+ int do_autopm = 1;
+ DECLARE_COMPLETION_ONSTACK(notif_completion);
+
+ d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
+ i2400m, ack, ack_size);
+ BUG_ON(_ack == i2400m->bm_ack_buf);
+ result = usb_autopm_get_interface(i2400mu->usb_iface);
+ if (result < 0) {
+ dev_err(dev, "BM-ACK: can't get autopm: %d\n", (int) result);
+ do_autopm = 0;
+ }
+ usb_init_urb(¬if_urb); /* ready notifications */
+ usb_get_urb(¬if_urb);
+ offset = 0;
+ while (offset < ack_size) {
+ init_completion(¬if_completion);
+ result = i2400mu_notif_submit(i2400mu, ¬if_urb,
+ ¬if_completion);
+ if (result < 0)
+ goto error_notif_urb_submit;
+ val = wait_for_completion_interruptible_timeout(
+ ¬if_completion, HZ);
+ if (val == 0) {
+ result = -ETIMEDOUT;
+ usb_kill_urb(¬if_urb); /* Timedout */
+ goto error_notif_wait;
+ }
+ if (val == -ERESTARTSYS) {
+ result = -EINTR; /* Interrupted */
+ usb_kill_urb(¬if_urb);
+ goto error_notif_wait;
+ }
+ result = notif_urb.status; /* How was the ack? */
+ switch (result) {
+ case 0:
+ break;
+ case -EINVAL: /* while removing driver */
+ case -ENODEV: /* dev disconnect ... */
+ case -ENOENT: /* just ignore it */
+ case -ESHUTDOWN: /* and exit */
+ case -ECONNRESET:
+ result = -ESHUTDOWN;
+ goto error_dev_gone;
+ default: /* any other? */
+ usb_kill_urb(¬if_urb); /* Timedout */
+ if (edc_inc(&i2400mu->urb_edc,
+ EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME))
+ goto error_exceeded;
+ dev_err(dev, "BM-ACK: URB error %d, "
+ "retrying\n", notif_urb.status);
+ continue; /* retry */
+ }
+ if (notif_urb.actual_length == 0) {
+ d_printf(6, dev, "ZLP received, retrying\n");
+ continue;
+ }
+ /* Got data, append it to the buffer */
+ len = min(ack_size - offset, (size_t) notif_urb.actual_length);
+ memcpy(ack + offset, i2400m->bm_ack_buf, len);
+ offset += len;
+ }
+ result = offset;
+error_notif_urb_submit:
+error_notif_wait:
+error_dev_gone:
+out:
+ if (do_autopm)
+ usb_autopm_put_interface(i2400mu->usb_iface);
+ d_fnend(8, dev, "(i2400m %p ack %p size %zu) = %zd\n",
+ i2400m, ack, ack_size, result);
+ return result;
+
+error_exceeded:
+ dev_err(dev, "bm: maximum errors in notification URB exceeded; "
+ "resetting device\n");
+ usb_queue_reset_device(i2400mu->usb_iface);
+ goto out;
+}
diff --git a/drivers/net/wimax/i2400m/usb-notif.c b/drivers/net/wimax/i2400m/usb-notif.c
new file mode 100644
index 00000000000..9702c22b249
--- /dev/null
+++ b/drivers/net/wimax/i2400m/usb-notif.c
@@ -0,0 +1,269 @@
+/*
+ * Intel Wireless WiMAX Connection 2400m over USB
+ * Notification handling
+ *
+ *
+ * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Intel Corporation
+ * Yanir Lubetkin
+ * Inaky Perez-Gonzalez
+ * - Initial implementation
+ *
+ *
+ * The notification endpoint is active when the device is not in boot
+ * mode; in here we just read and get notifications; based on those,
+ * we act to either reinitialize the device after a reboot or to
+ * submit a RX request.
+ *
+ * ROADMAP
+ *
+ * i2400mu_usb_notification_setup()
+ *
+ * i2400mu_usb_notification_release()
+ *
+ * i2400mu_usb_notification_cb() Called when a URB is ready
+ * i2400mu_notif_grok()
+ * i2400m_dev_reset_handle()
+ * i2400mu_rx_kick()
+ */
+#include
+#include "i2400m-usb.h"
+
+
+#define D_SUBMODULE notif
+#include "usb-debug-levels.h"
+
+
+static const
+__le32 i2400m_ZERO_BARKER[4] = { 0, 0, 0, 0 };
+
+
+/*
+ * Process a received notification
+ *
+ * In normal operation mode, we can only receive two types of payloads
+ * on the notification endpoint:
+ *
+ * - a reboot barker, we do a bootstrap (the device has reseted).
+ *
+ * - a block of zeroes: there is pending data in the IN endpoint
+ */
+static
+int i2400mu_notification_grok(struct i2400mu *i2400mu, const void *buf,
+ size_t buf_len)
+{
+ int ret;
+ struct device *dev = &i2400mu->usb_iface->dev;
+ struct i2400m *i2400m = &i2400mu->i2400m;
+
+ d_fnstart(4, dev, "(i2400m %p buf %p buf_len %zu)\n",
+ i2400mu, buf, buf_len);
+ ret = -EIO;
+ if (buf_len < sizeof(i2400m_NBOOT_BARKER))
+ /* Not a bug, just ignore */
+ goto error_bad_size;
+ if (!memcmp(i2400m_NBOOT_BARKER, buf, sizeof(i2400m_NBOOT_BARKER))
+ || !memcmp(i2400m_SBOOT_BARKER, buf, sizeof(i2400m_SBOOT_BARKER)))
+ ret = i2400m_dev_reset_handle(i2400m);
+ else if (!memcmp(i2400m_ZERO_BARKER, buf, sizeof(i2400m_ZERO_BARKER))) {
+ i2400mu_rx_kick(i2400mu);
+ ret = 0;
+ } else { /* Unknown or unexpected data in the notif message */
+ char prefix[64];
+ ret = -EIO;
+ dev_err(dev, "HW BUG? Unknown/unexpected data in notification "
+ "message (%zu bytes)\n", buf_len);
+ snprintf(prefix, sizeof(prefix), "%s %s: ",
+ dev_driver_string(dev) , dev->bus_id);
+ if (buf_len > 64) {
+ print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET,
+ 8, 4, buf, 64, 0);
+ printk(KERN_ERR "%s... (only first 64 bytes "
+ "dumped)\n", prefix);
+ } else
+ print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET,
+ 8, 4, buf, buf_len, 0);
+ }
+error_bad_size:
+ d_fnend(4, dev, "(i2400m %p buf %p buf_len %zu) = %d\n",
+ i2400mu, buf, buf_len, ret);
+ return ret;
+}
+
+
+/*
+ * URB callback for the notification endpoint
+ *
+ * @urb: the urb received from the notification endpoint
+ *
+ * This function will just process the USB side of the transaction,
+ * checking everything is fine, pass the processing to
+ * i2400m_notification_grok() and resubmit the URB.
+ */
+static
+void i2400mu_notification_cb(struct urb *urb)
+{
+ int ret;
+ struct i2400mu *i2400mu = urb->context;
+ struct device *dev = &i2400mu->usb_iface->dev;
+
+ d_fnstart(4, dev, "(urb %p status %d actual_length %d)\n",
+ urb, urb->status, urb->actual_length);
+ ret = urb->status;
+ switch (ret) {
+ case 0:
+ ret = i2400mu_notification_grok(i2400mu, urb->transfer_buffer,
+ urb->actual_length);
+ if (ret == -EIO && edc_inc(&i2400mu->urb_edc, EDC_MAX_ERRORS,
+ EDC_ERROR_TIMEFRAME))
+ goto error_exceeded;
+ if (ret == -ENOMEM) /* uff...power cycle? shutdown? */
+ goto error_exceeded;
+ break;
+ case -EINVAL: /* while removing driver */
+ case -ENODEV: /* dev disconnect ... */
+ case -ENOENT: /* ditto */
+ case -ESHUTDOWN: /* URB killed */
+ case -ECONNRESET: /* disconnection */
+ goto out; /* Notify around */
+ default: /* Some error? */
+ if (edc_inc(&i2400mu->urb_edc,
+ EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME))
+ goto error_exceeded;
+ dev_err(dev, "notification: URB error %d, retrying\n",
+ urb->status);
+ }
+ usb_mark_last_busy(i2400mu->usb_dev);
+ ret = usb_submit_urb(i2400mu->notif_urb, GFP_ATOMIC);
+ switch (ret) {
+ case 0:
+ case -EINVAL: /* while removing driver */
+ case -ENODEV: /* dev disconnect ... */
+ case -ENOENT: /* ditto */
+ case -ESHUTDOWN: /* URB killed */
+ case -ECONNRESET: /* disconnection */
+ break; /* just ignore */
+ default: /* Some error? */
+ dev_err(dev, "notification: cannot submit URB: %d\n", ret);
+ goto error_submit;
+ }
+ d_fnend(4, dev, "(urb %p status %d actual_length %d) = void\n",
+ urb, urb->status, urb->actual_length);
+ return;
+
+error_exceeded:
+ dev_err(dev, "maximum errors in notification URB exceeded; "
+ "resetting device\n");
+error_submit:
+ usb_queue_reset_device(i2400mu->usb_iface);
+out:
+ d_fnend(4, dev, "(urb %p status %d actual_length %d) = void\n",
+ urb, urb->status, urb->actual_length);
+ return;
+}
+
+
+/*
+ * setup the notification endpoint
+ *
+ * @i2400m: device descriptor
+ *
+ * This procedure prepares the notification urb and handler for receiving
+ * unsolicited barkers from the device.
+ */
+int i2400mu_notification_setup(struct i2400mu *i2400mu)
+{
+ struct device *dev = &i2400mu->usb_iface->dev;
+ int usb_pipe, ret = 0;
+ struct usb_endpoint_descriptor *epd;
+ char *buf;
+
+ d_fnstart(4, dev, "(i2400m %p)\n", i2400mu);
+ buf = kmalloc(I2400MU_MAX_NOTIFICATION_LEN, GFP_KERNEL | GFP_DMA);
+ if (buf == NULL) {
+ dev_err(dev, "notification: buffer allocation failed\n");
+ ret = -ENOMEM;
+ goto error_buf_alloc;
+ }
+
+ i2400mu->notif_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!i2400mu->notif_urb) {
+ ret = -ENOMEM;
+ dev_err(dev, "notification: cannot allocate URB\n");
+ goto error_alloc_urb;
+ }
+ epd = usb_get_epd(i2400mu->usb_iface, I2400MU_EP_NOTIFICATION);
+ usb_pipe = usb_rcvintpipe(i2400mu->usb_dev, epd->bEndpointAddress);
+ usb_fill_int_urb(i2400mu->notif_urb, i2400mu->usb_dev, usb_pipe,
+ buf, I2400MU_MAX_NOTIFICATION_LEN,
+ i2400mu_notification_cb, i2400mu, epd->bInterval);
+ ret = usb_submit_urb(i2400mu->notif_urb, GFP_KERNEL);
+ if (ret != 0) {
+ dev_err(dev, "notification: cannot submit URB: %d\n", ret);
+ goto error_submit;
+ }
+ d_fnend(4, dev, "(i2400m %p) = %d\n", i2400mu, ret);
+ return ret;
+
+error_submit:
+ usb_free_urb(i2400mu->notif_urb);
+error_alloc_urb:
+ kfree(buf);
+error_buf_alloc:
+ d_fnend(4, dev, "(i2400m %p) = %d\n", i2400mu, ret);
+ return ret;
+}
+
+
+/*
+ * Tear down of the notification mechanism
+ *
+ * @i2400m: device descriptor
+ *
+ * Kill the interrupt endpoint urb, free any allocated resources.
+ *
+ * We need to check if we have done it before as for example,
+ * _suspend() call this; if after a suspend() we get a _disconnect()
+ * (as the case is when hibernating), nothing bad happens.
+ */
+void i2400mu_notification_release(struct i2400mu *i2400mu)
+{
+ struct device *dev = &i2400mu->usb_iface->dev;
+
+ d_fnstart(4, dev, "(i2400mu %p)\n", i2400mu);
+ if (i2400mu->notif_urb != NULL) {
+ usb_kill_urb(i2400mu->notif_urb);
+ kfree(i2400mu->notif_urb->transfer_buffer);
+ usb_free_urb(i2400mu->notif_urb);
+ i2400mu->notif_urb = NULL;
+ }
+ d_fnend(4, dev, "(i2400mu %p)\n", i2400mu);
+}
diff --git a/drivers/net/wimax/i2400m/usb-rx.c b/drivers/net/wimax/i2400m/usb-rx.c
new file mode 100644
index 00000000000..074cc1f8985
--- /dev/null
+++ b/drivers/net/wimax/i2400m/usb-rx.c
@@ -0,0 +1,417 @@
+/*
+ * Intel Wireless WiMAX Connection 2400m
+ * USB RX handling
+ *
+ *
+ * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Intel Corporation
+ * Yanir Lubetkin
+ * - Initial implementation
+ * Inaky Perez-Gonzalez
+ * - Use skb_clone(), break up processing in chunks
+ * - Split transport/device specific
+ * - Make buffer size dynamic to exert less memory pressure
+ *
+ *
+ * This handles the RX path on USB.
+ *
+ * When a notification is received that says 'there is RX data ready',
+ * we call i2400mu_rx_kick(); that wakes up the RX kthread, which
+ * reads a buffer from USB and passes it to i2400m_rx() in the generic
+ * handling code. The RX buffer has an specific format that is
+ * described in rx.c.
+ *
+ * We use a kernel thread in a loop because:
+ *
+ * - we want to be able to call the USB power management get/put
+ * functions (blocking) before each transaction.
+ *
+ * - We might get a lot of notifications and we don't want to submit
+ * a zillion reads; by serializing, we are throttling.
+ *
+ * - RX data processing can get heavy enough so that it is not
+ * appropiate for doing it in the USB callback; thus we run it in a
+ * process context.
+ *
+ * We provide a read buffer of an arbitrary size (short of a page); if
+ * the callback reports -EOVERFLOW, it means it was too small, so we
+ * just double the size and retry (being careful to append, as
+ * sometimes the device provided some data). Every now and then we
+ * check if the average packet size is smaller than the current packet
+ * size and if so, we halve it. At the end, the size of the
+ * preallocated buffer should be following the average received
+ * transaction size, adapting dynamically to it.
+ *
+ * ROADMAP
+ *
+ * i2400mu_rx_kick() Called from notif.c when we get a
+ * 'data ready' notification
+ * i2400mu_rxd() Kernel RX daemon
+ * i2400mu_rx() Receive USB data
+ * i2400m_rx() Send data to generic i2400m RX handling
+ *
+ * i2400mu_rx_setup() called from i2400mu_bus_dev_start()
+ *
+ * i2400mu_rx_release() called from i2400mu_bus_dev_stop()
+ */
+#include
+#include
+#include "i2400m-usb.h"
+
+
+#define D_SUBMODULE rx
+#include "usb-debug-levels.h"
+
+/*
+ * Dynamic RX size
+ *
+ * We can't let the rx_size be a multiple of 512 bytes (the RX
+ * endpoint's max packet size). On some USB host controllers (we
+ * haven't been able to fully characterize which), if the device is
+ * about to send (for example) X bytes and we only post a buffer to
+ * receive n*512, it will fail to mark that as babble (so that
+ * i2400mu_rx() [case -EOVERFLOW] can resize the buffer and get the
+ * rest).
+ *
+ * So on growing or shrinking, if it is a multiple of the
+ * maxpacketsize, we remove some (instead of incresing some, so in a
+ * buddy allocator we try to waste less space).
+ *
+ * Note we also need a hook for this on i2400mu_rx() -- when we do the
+ * first read, we are sure we won't hit this spot because
+ * i240mm->rx_size has been set properly. However, if we have to
+ * double because of -EOVERFLOW, when we launch the read to get the
+ * rest of the data, we *have* to make sure that also is not a
+ * multiple of the max_pkt_size.
+ */
+
+static
+size_t i2400mu_rx_size_grow(struct i2400mu *i2400mu)
+{
+ struct device *dev = &i2400mu->usb_iface->dev;
+ size_t rx_size;
+ const size_t max_pkt_size = 512;
+
+ rx_size = 2 * i2400mu->rx_size;
+ if (rx_size % max_pkt_size == 0) {
+ rx_size -= 8;
+ d_printf(1, dev,
+ "RX: expected size grew to %zu [adjusted -8] "
+ "from %zu\n",
+ rx_size, i2400mu->rx_size);
+ } else
+ d_printf(1, dev,
+ "RX: expected size grew to %zu from %zu\n",
+ rx_size, i2400mu->rx_size);
+ return rx_size;
+}
+
+
+static
+void i2400mu_rx_size_maybe_shrink(struct i2400mu *i2400mu)
+{
+ const size_t max_pkt_size = 512;
+ struct device *dev = &i2400mu->usb_iface->dev;
+
+ if (unlikely(i2400mu->rx_size_cnt >= 100
+ && i2400mu->rx_size_auto_shrink)) {
+ size_t avg_rx_size =
+ i2400mu->rx_size_acc / i2400mu->rx_size_cnt;
+ size_t new_rx_size = i2400mu->rx_size / 2;
+ if (avg_rx_size < new_rx_size) {
+ if (new_rx_size % max_pkt_size == 0) {
+ new_rx_size -= 8;
+ d_printf(1, dev,
+ "RX: expected size shrank to %zu "
+ "[adjusted -8] from %zu\n",
+ new_rx_size, i2400mu->rx_size);
+ } else
+ d_printf(1, dev,
+ "RX: expected size shrank to %zu "
+ "from %zu\n",
+ new_rx_size, i2400mu->rx_size);
+ i2400mu->rx_size = new_rx_size;
+ i2400mu->rx_size_cnt = 0;
+ i2400mu->rx_size_acc = i2400mu->rx_size;
+ }
+ }
+}
+
+/*
+ * Receive a message with payloads from the USB bus into an skb
+ *
+ * @i2400mu: USB device descriptor
+ * @rx_skb: skb where to place the received message
+ *
+ * Deals with all the USB-specifics of receiving, dynamically
+ * increasing the buffer size if so needed. Returns the payload in the
+ * skb, ready to process. On a zero-length packet, we retry.
+ *
+ * On soft USB errors, we retry (until they become too frequent and
+ * then are promoted to hard); on hard USB errors, we reset the
+ * device. On other errors (skb realloacation, we just drop it and
+ * hope for the next invocation to solve it).
+ *
+ * Returns: pointer to the skb if ok, ERR_PTR on error.
+ * NOTE: this function might realloc the skb (if it is too small),
+ * so always update with the one returned.
+ * ERR_PTR() is < 0 on error.
+ */
+static
+struct sk_buff *i2400mu_rx(struct i2400mu *i2400mu, struct sk_buff *rx_skb)
+{
+ int result = 0;
+ struct device *dev = &i2400mu->usb_iface->dev;
+ int usb_pipe, read_size, rx_size, do_autopm;
+ struct usb_endpoint_descriptor *epd;
+ const size_t max_pkt_size = 512;
+
+ d_fnstart(4, dev, "(i2400mu %p)\n", i2400mu);
+ do_autopm = atomic_read(&i2400mu->do_autopm);
+ result = do_autopm ?
+ usb_autopm_get_interface(i2400mu->usb_iface) : 0;
+ if (result < 0) {
+ dev_err(dev, "RX: can't get autopm: %d\n", result);
+ do_autopm = 0;
+ }
+ epd = usb_get_epd(i2400mu->usb_iface, I2400MU_EP_BULK_IN);
+ usb_pipe = usb_rcvbulkpipe(i2400mu->usb_dev, epd->bEndpointAddress);
+retry:
+ rx_size = skb_end_pointer(rx_skb) - rx_skb->data - rx_skb->len;
+ if (unlikely(rx_size % max_pkt_size == 0)) {
+ rx_size -= 8;
+ d_printf(1, dev, "RX: rx_size adapted to %d [-8]\n", rx_size);
+ }
+ result = usb_bulk_msg(
+ i2400mu->usb_dev, usb_pipe, rx_skb->data + rx_skb->len,
+ rx_size, &read_size, HZ);
+ usb_mark_last_busy(i2400mu->usb_dev);
+ switch (result) {
+ case 0:
+ if (read_size == 0)
+ goto retry; /* ZLP, just resubmit */
+ skb_put(rx_skb, read_size);
+ break;
+ case -EINVAL: /* while removing driver */
+ case -ENODEV: /* dev disconnect ... */
+ case -ENOENT: /* just ignore it */
+ case -ESHUTDOWN:
+ case -ECONNRESET:
+ break;
+ case -EOVERFLOW: { /* too small, reallocate */
+ struct sk_buff *new_skb;
+ rx_size = i2400mu_rx_size_grow(i2400mu);
+ if (rx_size <= (1 << 16)) /* cap it */
+ i2400mu->rx_size = rx_size;
+ else if (printk_ratelimit()) {
+ dev_err(dev, "BUG? rx_size up to %d\n", rx_size);
+ result = -EINVAL;
+ goto out;
+ }
+ skb_put(rx_skb, read_size);
+ new_skb = skb_copy_expand(rx_skb, 0, rx_size - rx_skb->len,
+ GFP_KERNEL);
+ if (new_skb == NULL) {
+ if (printk_ratelimit())
+ dev_err(dev, "RX: Can't reallocate skb to %d; "
+ "RX dropped\n", rx_size);
+ kfree(rx_skb);
+ result = 0;
+ goto out; /* drop it...*/
+ }
+ kfree_skb(rx_skb);
+ rx_skb = new_skb;
+ i2400mu->rx_size_cnt = 0;
+ i2400mu->rx_size_acc = i2400mu->rx_size;
+ d_printf(1, dev, "RX: size changed to %d, received %d, "
+ "copied %d, capacity %ld\n",
+ rx_size, read_size, rx_skb->len,
+ (long) (skb_end_pointer(new_skb) - new_skb->head));
+ goto retry;
+ }
+ /* In most cases, it happens due to the hardware scheduling a
+ * read when there was no data - unfortunately, we have no way
+ * to tell this timeout from a USB timeout. So we just ignore
+ * it. */
+ case -ETIMEDOUT:
+ dev_err(dev, "RX: timeout: %d\n", result);
+ result = 0;
+ break;
+ default: /* Any error */
+ if (edc_inc(&i2400mu->urb_edc,
+ EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME))
+ goto error_reset;
+ dev_err(dev, "RX: error receiving URB: %d, retrying\n", result);
+ goto retry;
+ }
+out:
+ if (do_autopm)
+ usb_autopm_put_interface(i2400mu->usb_iface);
+ d_fnend(4, dev, "(i2400mu %p) = %p\n", i2400mu, rx_skb);
+ return rx_skb;
+
+error_reset:
+ dev_err(dev, "RX: maximum errors in URB exceeded; "
+ "resetting device\n");
+ usb_queue_reset_device(i2400mu->usb_iface);
+ rx_skb = ERR_PTR(result);
+ goto out;
+}
+
+
+/*
+ * Kernel thread for USB reception of data
+ *
+ * This thread waits for a kick; once kicked, it will allocate an skb
+ * and receive a single message to it from USB (using
+ * i2400mu_rx()). Once received, it is passed to the generic i2400m RX
+ * code for processing.
+ *
+ * When done processing, it runs some dirty statistics to verify if
+ * the last 100 messages received were smaller than half of the
+ * current RX buffer size. In that case, the RX buffer size is
+ * halved. This will helps lowering the pressure on the memory
+ * allocator.
+ *
+ * Hard errors force the thread to exit.
+ */
+static
+int i2400mu_rxd(void *_i2400mu)
+{
+ int result = 0;
+ struct i2400mu *i2400mu = _i2400mu;
+ struct i2400m *i2400m = &i2400mu->i2400m;
+ struct device *dev = &i2400mu->usb_iface->dev;
+ struct net_device *net_dev = i2400m->wimax_dev.net_dev;
+ size_t pending;
+ int rx_size;
+ struct sk_buff *rx_skb;
+
+ d_fnstart(4, dev, "(i2400mu %p)\n", i2400mu);
+ while (1) {
+ d_printf(2, dev, "TX: waiting for messages\n");
+ pending = 0;
+ wait_event_interruptible(
+ i2400mu->rx_wq,
+ (kthread_should_stop() /* check this first! */
+ || (pending = atomic_read(&i2400mu->rx_pending_count)))
+ );
+ if (kthread_should_stop())
+ break;
+ if (pending == 0)
+ continue;
+ rx_size = i2400mu->rx_size;
+ d_printf(2, dev, "RX: reading up to %d bytes\n", rx_size);
+ rx_skb = __netdev_alloc_skb(net_dev, rx_size, GFP_KERNEL);
+ if (rx_skb == NULL) {
+ dev_err(dev, "RX: can't allocate skb [%d bytes]\n",
+ rx_size);
+ msleep(50); /* give it some time? */
+ continue;
+ }
+
+ /* Receive the message with the payloads */
+ rx_skb = i2400mu_rx(i2400mu, rx_skb);
+ result = PTR_ERR(rx_skb);
+ if (IS_ERR(rx_skb))
+ goto out;
+ atomic_dec(&i2400mu->rx_pending_count);
+ if (rx_skb->len == 0) { /* some ignorable condition */
+ kfree_skb(rx_skb);
+ continue;
+ }
+
+ /* Deliver the message to the generic i2400m code */
+ i2400mu->rx_size_cnt++;
+ i2400mu->rx_size_acc += rx_skb->len;
+ result = i2400m_rx(i2400m, rx_skb);
+ if (result == -EIO
+ && edc_inc(&i2400mu->urb_edc,
+ EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
+ goto error_reset;
+ }
+
+ /* Maybe adjust RX buffer size */
+ i2400mu_rx_size_maybe_shrink(i2400mu);
+ }
+ result = 0;
+out:
+ d_fnend(4, dev, "(i2400mu %p) = %d\n", i2400mu, result);
+ return result;
+
+error_reset:
+ dev_err(dev, "RX: maximum errors in received buffer exceeded; "
+ "resetting device\n");
+ usb_queue_reset_device(i2400mu->usb_iface);
+ goto out;
+}
+
+
+/*
+ * Start reading from the device
+ *
+ * @i2400m: device instance
+ *
+ * Notify the RX thread that there is data pending.
+ */
+void i2400mu_rx_kick(struct i2400mu *i2400mu)
+{
+ struct i2400m *i2400m = &i2400mu->i2400m;
+ struct device *dev = &i2400mu->usb_iface->dev;
+
+ d_fnstart(3, dev, "(i2400mu %p)\n", i2400m);
+ atomic_inc(&i2400mu->rx_pending_count);
+ wake_up_all(&i2400mu->rx_wq);
+ d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
+}
+
+
+int i2400mu_rx_setup(struct i2400mu *i2400mu)
+{
+ int result = 0;
+ struct i2400m *i2400m = &i2400mu->i2400m;
+ struct device *dev = &i2400mu->usb_iface->dev;
+ struct wimax_dev *wimax_dev = &i2400m->wimax_dev;
+
+ i2400mu->rx_kthread = kthread_run(i2400mu_rxd, i2400mu, "%s-rx",
+ wimax_dev->name);
+ if (IS_ERR(i2400mu->rx_kthread)) {
+ result = PTR_ERR(i2400mu->rx_kthread);
+ dev_err(dev, "RX: cannot start thread: %d\n", result);
+ }
+ return result;
+}
+
+void i2400mu_rx_release(struct i2400mu *i2400mu)
+{
+ kthread_stop(i2400mu->rx_kthread);
+}
+
diff --git a/drivers/net/wimax/i2400m/usb-tx.c b/drivers/net/wimax/i2400m/usb-tx.c
new file mode 100644
index 00000000000..dfd893356f4
--- /dev/null
+++ b/drivers/net/wimax/i2400m/usb-tx.c
@@ -0,0 +1,229 @@
+/*
+ * Intel Wireless WiMAX Connection 2400m
+ * USB specific TX handling
+ *
+ *
+ * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Intel Corporation
+ * Yanir Lubetkin
+ * - Initial implementation
+ * Inaky Perez-Gonzalez
+ * - Split transport/device specific
+ *
+ *
+ * Takes the TX messages in the i2400m's driver TX FIFO and sends them
+ * to the device until there are no more.
+ *
+ * If we fail sending the message, we just drop it. There isn't much
+ * we can do at this point. We could also retry, but the USB stack has
+ * already retried and still failed, so there is not much of a
+ * point. As well, most of the traffic is network, which has recovery
+ * methods for dropped packets.
+ *
+ * For sending we just obtain a FIFO buffer to send, send it to the
+ * USB bulk out, tell the TX FIFO code we have sent it; query for
+ * another one, etc... until done.
+ *
+ * We use a thread so we can call usb_autopm_enable() and
+ * usb_autopm_disable() for each transaction; this way when the device
+ * goes idle, it will suspend. It also has less overhead than a
+ * dedicated workqueue, as it is being used for a single task.
+ *
+ * ROADMAP
+ *
+ * i2400mu_tx_setup()
+ * i2400mu_tx_release()
+ *
+ * i2400mu_bus_tx_kick() - Called by the tx.c code when there
+ * is new data in the FIFO.
+ * i2400mu_txd()
+ * i2400m_tx_msg_get()
+ * i2400m_tx_msg_sent()
+ */
+#include "i2400m-usb.h"
+
+
+#define D_SUBMODULE tx
+#include "usb-debug-levels.h"
+
+
+/*
+ * Get the next TX message in the TX FIFO and send it to the device
+ *
+ * Note that any iteration consumes a message to be sent, no matter if
+ * it succeeds or fails (we have no real way to retry or complain).
+ *
+ * Return: 0 if ok, < 0 errno code on hard error.
+ */
+static
+int i2400mu_tx(struct i2400mu *i2400mu, struct i2400m_msg_hdr *tx_msg,
+ size_t tx_msg_size)
+{
+ int result = 0;
+ struct i2400m *i2400m = &i2400mu->i2400m;
+ struct device *dev = &i2400mu->usb_iface->dev;
+ int usb_pipe, sent_size, do_autopm;
+ struct usb_endpoint_descriptor *epd;
+
+ d_fnstart(4, dev, "(i2400mu %p)\n", i2400mu);
+ do_autopm = atomic_read(&i2400mu->do_autopm);
+ result = do_autopm ?
+ usb_autopm_get_interface(i2400mu->usb_iface) : 0;
+ if (result < 0) {
+ dev_err(dev, "TX: can't get autopm: %d\n", result);
+ do_autopm = 0;
+ }
+ epd = usb_get_epd(i2400mu->usb_iface, I2400MU_EP_BULK_OUT);
+ usb_pipe = usb_sndbulkpipe(i2400mu->usb_dev, epd->bEndpointAddress);
+retry:
+ result = usb_bulk_msg(i2400mu->usb_dev, usb_pipe,
+ tx_msg, tx_msg_size, &sent_size, HZ);
+ usb_mark_last_busy(i2400mu->usb_dev);
+ switch (result) {
+ case 0:
+ if (sent_size != tx_msg_size) { /* Too short? drop it */
+ dev_err(dev, "TX: short write (%d B vs %zu "
+ "expected)\n", sent_size, tx_msg_size);
+ result = -EIO;
+ }
+ break;
+ case -EINVAL: /* while removing driver */
+ case -ENODEV: /* dev disconnect ... */
+ case -ENOENT: /* just ignore it */
+ case -ESHUTDOWN: /* and exit */
+ case -ECONNRESET:
+ result = -ESHUTDOWN;
+ break;
+ default: /* Some error? */
+ if (edc_inc(&i2400mu->urb_edc,
+ EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
+ dev_err(dev, "TX: maximum errors in URB "
+ "exceeded; resetting device\n");
+ usb_queue_reset_device(i2400mu->usb_iface);
+ } else {
+ dev_err(dev, "TX: cannot send URB; retrying. "
+ "tx_msg @%zu %zu B [%d sent]: %d\n",
+ (void *) tx_msg - i2400m->tx_buf,
+ tx_msg_size, sent_size, result);
+ goto retry;
+ }
+ }
+ if (do_autopm)
+ usb_autopm_put_interface(i2400mu->usb_iface);
+ d_fnend(4, dev, "(i2400mu %p) = result\n", i2400mu);
+ return result;
+}
+
+
+/*
+ * Get the next TX message in the TX FIFO and send it to the device
+ *
+ * Note we exit the loop if i2400mu_tx() fails; that funtion only
+ * fails on hard error (failing to tx a buffer not being one of them,
+ * see its doc).
+ *
+ * Return: 0
+ */
+static
+int i2400mu_txd(void *_i2400mu)
+{
+ int result = 0;
+ struct i2400mu *i2400mu = _i2400mu;
+ struct i2400m *i2400m = &i2400mu->i2400m;
+ struct device *dev = &i2400mu->usb_iface->dev;
+ struct i2400m_msg_hdr *tx_msg;
+ size_t tx_msg_size;
+
+ d_fnstart(4, dev, "(i2400mu %p)\n", i2400mu);
+
+ while (1) {
+ d_printf(2, dev, "TX: waiting for messages\n");
+ tx_msg = NULL;
+ wait_event_interruptible(
+ i2400mu->tx_wq,
+ (kthread_should_stop() /* check this first! */
+ || (tx_msg = i2400m_tx_msg_get(i2400m, &tx_msg_size)))
+ );
+ if (kthread_should_stop())
+ break;
+ WARN_ON(tx_msg == NULL); /* should not happen...*/
+ d_printf(2, dev, "TX: submitting %zu bytes\n", tx_msg_size);
+ d_dump(5, dev, tx_msg, tx_msg_size);
+ /* Yeah, we ignore errors ... not much we can do */
+ i2400mu_tx(i2400mu, tx_msg, tx_msg_size);
+ i2400m_tx_msg_sent(i2400m); /* ack it, advance the FIFO */
+ if (result < 0)
+ break;
+ }
+ d_fnend(4, dev, "(i2400mu %p) = %d\n", i2400mu, result);
+ return result;
+}
+
+
+/*
+ * i2400m TX engine notifies us that there is data in the FIFO ready
+ * for TX
+ *
+ * If there is a URB in flight, don't do anything; when it finishes,
+ * it will see there is data in the FIFO and send it. Else, just
+ * submit a write.
+ */
+void i2400mu_bus_tx_kick(struct i2400m *i2400m)
+{
+ struct i2400mu *i2400mu = container_of(i2400m, struct i2400mu, i2400m);
+ struct device *dev = &i2400mu->usb_iface->dev;
+
+ d_fnstart(3, dev, "(i2400m %p) = void\n", i2400m);
+ wake_up_all(&i2400mu->tx_wq);
+ d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
+}
+
+
+int i2400mu_tx_setup(struct i2400mu *i2400mu)
+{
+ int result = 0;
+ struct i2400m *i2400m = &i2400mu->i2400m;
+ struct device *dev = &i2400mu->usb_iface->dev;
+ struct wimax_dev *wimax_dev = &i2400m->wimax_dev;
+
+ i2400mu->tx_kthread = kthread_run(i2400mu_txd, i2400mu, "%s-tx",
+ wimax_dev->name);
+ if (IS_ERR(i2400mu->tx_kthread)) {
+ result = PTR_ERR(i2400mu->tx_kthread);
+ dev_err(dev, "TX: cannot start thread: %d\n", result);
+ }
+ return result;
+}
+
+void i2400mu_tx_release(struct i2400mu *i2400mu)
+{
+ kthread_stop(i2400mu->tx_kthread);
+}
diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c
new file mode 100644
index 00000000000..6d4b65fd9c1
--- /dev/null
+++ b/drivers/net/wimax/i2400m/usb.c
@@ -0,0 +1,591 @@
+/*
+ * Intel Wireless WiMAX Connection 2400m
+ * Linux driver model glue for USB device, reset & fw upload
+ *
+ *
+ * Copyright (C) 2007-2008 Intel Corporation
+ * Inaky Perez-Gonzalez
+ * Yanir Lubetkin
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * See i2400m-usb.h for a general description of this driver.
+ *
+ * This file implements driver model glue, and hook ups for the
+ * generic driver to implement the bus-specific functions (device
+ * communication setup/tear down, firmware upload and resetting).
+ *
+ * ROADMAP
+ *
+ * i2400mu_probe()
+ * alloc_netdev()...
+ * i2400mu_netdev_setup()
+ * i2400mu_init()
+ * i2400m_netdev_setup()
+ * i2400m_setup()...
+ *
+ * i2400mu_disconnect
+ * i2400m_release()
+ * free_netdev()
+ *
+ * i2400mu_suspend()
+ * i2400m_cmd_enter_powersave()
+ * i2400mu_notification_release()
+ *
+ * i2400mu_resume()
+ * i2400mu_notification_setup()
+ *
+ * i2400mu_bus_dev_start() Called by i2400m_dev_start() [who is
+ * i2400mu_tx_setup() called by i2400m_setup()]
+ * i2400mu_rx_setup()
+ * i2400mu_notification_setup()
+ *
+ * i2400mu_bus_dev_stop() Called by i2400m_dev_stop() [who is
+ * i2400mu_notification_release() called by i2400m_release()]
+ * i2400mu_rx_release()
+ * i2400mu_tx_release()
+ *
+ * i2400mu_bus_reset() Called by i2400m->bus_reset
+ * __i2400mu_reset()
+ * __i2400mu_send_barker()
+ * usb_reset_device()
+ */
+#include "i2400m-usb.h"
+#include
+#include
+
+
+#define D_SUBMODULE usb
+#include "usb-debug-levels.h"
+
+
+/* Our firmware file name */
+#define I2400MU_FW_FILE_NAME "i2400m-fw-usb-" I2400M_FW_VERSION ".sbcf"
+
+static
+int i2400mu_bus_dev_start(struct i2400m *i2400m)
+{
+ int result;
+ struct i2400mu *i2400mu = container_of(i2400m, struct i2400mu, i2400m);
+ struct device *dev = &i2400mu->usb_iface->dev;
+
+ d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
+ result = i2400mu_tx_setup(i2400mu);
+ if (result < 0)
+ goto error_usb_tx_setup;
+ result = i2400mu_rx_setup(i2400mu);
+ if (result < 0)
+ goto error_usb_rx_setup;
+ result = i2400mu_notification_setup(i2400mu);
+ if (result < 0)
+ goto error_notif_setup;
+ d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result);
+ return result;
+
+error_notif_setup:
+ i2400mu_rx_release(i2400mu);
+error_usb_rx_setup:
+ i2400mu_tx_release(i2400mu);
+error_usb_tx_setup:
+ d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
+ return result;
+}
+
+
+static
+void i2400mu_bus_dev_stop(struct i2400m *i2400m)
+{
+ struct i2400mu *i2400mu = container_of(i2400m, struct i2400mu, i2400m);
+ struct device *dev = &i2400mu->usb_iface->dev;
+
+ d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
+ i2400mu_notification_release(i2400mu);
+ i2400mu_rx_release(i2400mu);
+ i2400mu_tx_release(i2400mu);
+ d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
+}
+
+
+/*
+ * Sends a barker buffer to the device
+ *
+ * This helper will allocate a kmalloced buffer and use it to transmit
+ * (then free it). Reason for this is that other arches cannot use
+ * stack/vmalloc/text areas for DMA transfers.
+ *
+ * Error recovery here is simpler: anything is considered a hard error
+ * and will move the reset code to use a last-resort bus-based reset.
+ */
+static
+int __i2400mu_send_barker(struct i2400mu *i2400mu,
+ const __le32 *barker,
+ size_t barker_size,
+ unsigned endpoint)
+{
+ struct usb_endpoint_descriptor *epd = NULL;
+ int pipe, actual_len, ret;
+ struct device *dev = &i2400mu->usb_iface->dev;
+ void *buffer;
+ int do_autopm = 1;
+
+ ret = usb_autopm_get_interface(i2400mu->usb_iface);
+ if (ret < 0) {
+ dev_err(dev, "RESET: can't get autopm: %d\n", ret);
+ do_autopm = 0;
+ }
+ ret = -ENOMEM;
+ buffer = kmalloc(barker_size, GFP_KERNEL);
+ if (buffer == NULL)
+ goto error_kzalloc;
+ epd = usb_get_epd(i2400mu->usb_iface, endpoint);
+ pipe = usb_sndbulkpipe(i2400mu->usb_dev, epd->bEndpointAddress);
+ memcpy(buffer, barker, barker_size);
+ ret = usb_bulk_msg(i2400mu->usb_dev, pipe, buffer, barker_size,
+ &actual_len, HZ);
+ if (ret < 0) {
+ if (ret != -EINVAL)
+ dev_err(dev, "E: barker error: %d\n", ret);
+ } else if (actual_len != barker_size) {
+ dev_err(dev, "E: only %d bytes transmitted\n", actual_len);
+ ret = -EIO;
+ }
+ kfree(buffer);
+error_kzalloc:
+ if (do_autopm)
+ usb_autopm_put_interface(i2400mu->usb_iface);
+ return ret;
+}
+
+
+/*
+ * Reset a device at different levels (warm, cold or bus)
+ *
+ * @i2400m: device descriptor
+ * @reset_type: soft, warm or bus reset (I2400M_RT_WARM/SOFT/BUS)
+ *
+ * Warm and cold resets get a USB reset if they fail.
+ *
+ * Warm reset:
+ *
+ * The device will be fully reset internally, but won't be
+ * disconnected from the USB bus (so no reenumeration will
+ * happen). Firmware upload will be neccessary.
+ *
+ * The device will send a reboot barker in the notification endpoint
+ * that will trigger the driver to reinitialize the state
+ * automatically from notif.c:i2400m_notification_grok() into
+ * i2400m_dev_bootstrap_delayed().
+ *
+ * Cold and bus (USB) reset:
+ *
+ * The device will be fully reset internally, disconnected from the
+ * USB bus an a reenumeration will happen. Firmware upload will be
+ * neccessary. Thus, we don't do any locking or struct
+ * reinitialization, as we are going to be fully disconnected and
+ * reenumerated.
+ *
+ * Note we need to return -ENODEV if a warm reset was requested and we
+ * had to resort to a bus reset. See i2400m_op_reset(), wimax_reset()
+ * and wimax_dev->op_reset.
+ *
+ * WARNING: no driver state saved/fixed
+ */
+static
+int i2400mu_bus_reset(struct i2400m *i2400m, enum i2400m_reset_type rt)
+{
+ int result;
+ struct i2400mu *i2400mu =
+ container_of(i2400m, struct i2400mu, i2400m);
+ struct device *dev = i2400m_dev(i2400m);
+ static const __le32 i2400m_WARM_BOOT_BARKER[4] = {
+ __constant_cpu_to_le32(I2400M_WARM_RESET_BARKER),
+ __constant_cpu_to_le32(I2400M_WARM_RESET_BARKER),
+ __constant_cpu_to_le32(I2400M_WARM_RESET_BARKER),
+ __constant_cpu_to_le32(I2400M_WARM_RESET_BARKER),
+ };
+ static const __le32 i2400m_COLD_BOOT_BARKER[4] = {
+ __constant_cpu_to_le32(I2400M_COLD_RESET_BARKER),
+ __constant_cpu_to_le32(I2400M_COLD_RESET_BARKER),
+ __constant_cpu_to_le32(I2400M_COLD_RESET_BARKER),
+ __constant_cpu_to_le32(I2400M_COLD_RESET_BARKER),
+ };
+
+ d_fnstart(3, dev, "(i2400m %p rt %u)\n", i2400m, rt);
+ if (rt == I2400M_RT_WARM)
+ result = __i2400mu_send_barker(i2400mu, i2400m_WARM_BOOT_BARKER,
+ sizeof(i2400m_WARM_BOOT_BARKER),
+ I2400MU_EP_BULK_OUT);
+ else if (rt == I2400M_RT_COLD)
+ result = __i2400mu_send_barker(i2400mu, i2400m_COLD_BOOT_BARKER,
+ sizeof(i2400m_COLD_BOOT_BARKER),
+ I2400MU_EP_RESET_COLD);
+ else if (rt == I2400M_RT_BUS) {
+do_bus_reset:
+ result = usb_reset_device(i2400mu->usb_dev);
+ switch (result) {
+ case 0:
+ case -EINVAL: /* device is gone */
+ case -ENODEV:
+ case -ENOENT:
+ case -ESHUTDOWN:
+ result = rt == I2400M_RT_WARM ? -ENODEV : 0;
+ break; /* We assume the device is disconnected */
+ default:
+ dev_err(dev, "USB reset failed (%d), giving up!\n",
+ result);
+ }
+ } else
+ BUG();
+ if (result < 0
+ && result != -EINVAL /* device is gone */
+ && rt != I2400M_RT_BUS) {
+ dev_err(dev, "%s reset failed (%d); trying USB reset\n",
+ rt == I2400M_RT_WARM ? "warm" : "cold", result);
+ rt = I2400M_RT_BUS;
+ goto do_bus_reset;
+ }
+ d_fnend(3, dev, "(i2400m %p rt %u) = %d\n", i2400m, rt, result);
+ return result;
+}
+
+
+static
+void i2400mu_netdev_setup(struct net_device *net_dev)
+{
+ struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
+ struct i2400mu *i2400mu = container_of(i2400m, struct i2400mu, i2400m);
+ i2400mu_init(i2400mu);
+ i2400m_netdev_setup(net_dev);
+}
+
+
+/*
+ * Debug levels control; see debug.h
+ */
+struct d_level D_LEVEL[] = {
+ D_SUBMODULE_DEFINE(usb),
+ D_SUBMODULE_DEFINE(fw),
+ D_SUBMODULE_DEFINE(notif),
+ D_SUBMODULE_DEFINE(rx),
+ D_SUBMODULE_DEFINE(tx),
+};
+size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL);
+
+
+#define __debugfs_register(prefix, name, parent) \
+do { \
+ result = d_level_register_debugfs(prefix, name, parent); \
+ if (result < 0) \
+ goto error; \
+} while (0)
+
+
+static
+int i2400mu_debugfs_add(struct i2400mu *i2400mu)
+{
+ int result;
+ struct device *dev = &i2400mu->usb_iface->dev;
+ struct dentry *dentry = i2400mu->i2400m.wimax_dev.debugfs_dentry;
+ struct dentry *fd;
+
+ dentry = debugfs_create_dir("i2400m-usb", dentry);
+ result = PTR_ERR(dentry);
+ if (IS_ERR(dentry)) {
+ if (result == -ENODEV)
+ result = 0; /* No debugfs support */
+ goto error;
+ }
+ i2400mu->debugfs_dentry = dentry;
+ __debugfs_register("dl_", usb, dentry);
+ __debugfs_register("dl_", fw, dentry);
+ __debugfs_register("dl_", notif, dentry);
+ __debugfs_register("dl_", rx, dentry);
+ __debugfs_register("dl_", tx, dentry);
+
+ /* Don't touch these if you don't know what you are doing */
+ fd = debugfs_create_u8("rx_size_auto_shrink", 0600, dentry,
+ &i2400mu->rx_size_auto_shrink);
+ result = PTR_ERR(fd);
+ if (IS_ERR(fd) && result != -ENODEV) {
+ dev_err(dev, "Can't create debugfs entry "
+ "rx_size_auto_shrink: %d\n", result);
+ goto error;
+ }
+
+ fd = debugfs_create_size_t("rx_size", 0600, dentry,
+ &i2400mu->rx_size);
+ result = PTR_ERR(fd);
+ if (IS_ERR(fd) && result != -ENODEV) {
+ dev_err(dev, "Can't create debugfs entry "
+ "rx_size: %d\n", result);
+ goto error;
+ }
+
+ return 0;
+
+error:
+ debugfs_remove_recursive(i2400mu->debugfs_dentry);
+ return result;
+}
+
+
+/*
+ * Probe a i2400m interface and register it
+ *
+ * @iface: USB interface to link to
+ * @id: USB class/subclass/protocol id
+ * @returns: 0 if ok, < 0 errno code on error.
+ *
+ * Alloc a net device, initialize the bus-specific details and then
+ * calls the bus-generic initialization routine. That will register
+ * the wimax and netdev devices, upload the firmware [using
+ * _bus_bm_*()], call _bus_dev_start() to finalize the setup of the
+ * communication with the device and then will start to talk to it to
+ * finnish setting it up.
+ */
+static
+int i2400mu_probe(struct usb_interface *iface,
+ const struct usb_device_id *id)
+{
+ int result;
+ struct net_device *net_dev;
+ struct device *dev = &iface->dev;
+ struct i2400m *i2400m;
+ struct i2400mu *i2400mu;
+ struct usb_device *usb_dev = interface_to_usbdev(iface);
+
+ if (usb_dev->speed != USB_SPEED_HIGH)
+ dev_err(dev, "device not connected as high speed\n");
+
+ /* Allocate instance [calls i2400m_netdev_setup() on it]. */
+ result = -ENOMEM;
+ net_dev = alloc_netdev(sizeof(*i2400mu), "wmx%d",
+ i2400mu_netdev_setup);
+ if (net_dev == NULL) {
+ dev_err(dev, "no memory for network device instance\n");
+ goto error_alloc_netdev;
+ }
+ SET_NETDEV_DEV(net_dev, dev);
+ i2400m = net_dev_to_i2400m(net_dev);
+ i2400mu = container_of(i2400m, struct i2400mu, i2400m);
+ i2400m->wimax_dev.net_dev = net_dev;
+ i2400mu->usb_dev = usb_get_dev(usb_dev);
+ i2400mu->usb_iface = iface;
+ usb_set_intfdata(iface, i2400mu);
+
+ i2400m->bus_tx_block_size = I2400MU_BLK_SIZE;
+ i2400m->bus_pl_size_max = I2400MU_PL_SIZE_MAX;
+ i2400m->bus_dev_start = i2400mu_bus_dev_start;
+ i2400m->bus_dev_stop = i2400mu_bus_dev_stop;
+ i2400m->bus_tx_kick = i2400mu_bus_tx_kick;
+ i2400m->bus_reset = i2400mu_bus_reset;
+ i2400m->bus_bm_cmd_send = i2400mu_bus_bm_cmd_send;
+ i2400m->bus_bm_wait_for_ack = i2400mu_bus_bm_wait_for_ack;
+ i2400m->bus_fw_name = I2400MU_FW_FILE_NAME;
+ i2400m->bus_bm_mac_addr_impaired = 0;
+
+ iface->needs_remote_wakeup = 1; /* autosuspend (15s delay) */
+ device_init_wakeup(dev, 1);
+ usb_autopm_enable(i2400mu->usb_iface);
+ usb_dev->autosuspend_delay = 15 * HZ;
+ usb_dev->autosuspend_disabled = 0;
+
+ result = i2400m_setup(i2400m, I2400M_BRI_MAC_REINIT);
+ if (result < 0) {
+ dev_err(dev, "cannot setup device: %d\n", result);
+ goto error_setup;
+ }
+ result = i2400mu_debugfs_add(i2400mu);
+ if (result < 0) {
+ dev_err(dev, "Can't register i2400mu's debugfs: %d\n", result);
+ goto error_debugfs_add;
+ }
+ return 0;
+
+error_debugfs_add:
+ i2400m_release(i2400m);
+error_setup:
+ usb_set_intfdata(iface, NULL);
+ usb_put_dev(i2400mu->usb_dev);
+ free_netdev(net_dev);
+error_alloc_netdev:
+ return result;
+}
+
+
+/*
+ * Disconect a i2400m from the system.
+ *
+ * i2400m_stop() has been called before, so al the rx and tx contexts
+ * have been taken down already. Make sure the queue is stopped,
+ * unregister netdev and i2400m, free and kill.
+ */
+static
+void i2400mu_disconnect(struct usb_interface *iface)
+{
+ struct i2400mu *i2400mu = usb_get_intfdata(iface);
+ struct i2400m *i2400m = &i2400mu->i2400m;
+ struct net_device *net_dev = i2400m->wimax_dev.net_dev;
+ struct device *dev = &iface->dev;
+
+ d_fnstart(3, dev, "(iface %p i2400m %p)\n", iface, i2400m);
+
+ debugfs_remove_recursive(i2400mu->debugfs_dentry);
+ i2400m_release(i2400m);
+ usb_set_intfdata(iface, NULL);
+ usb_put_dev(i2400mu->usb_dev);
+ free_netdev(net_dev);
+ d_fnend(3, dev, "(iface %p i2400m %p) = void\n", iface, i2400m);
+}
+
+
+/*
+ * Get the device ready for USB port or system standby and hibernation
+ *
+ * USB port and system standby are handled the same.
+ *
+ * When the system hibernates, the USB device is powered down and then
+ * up, so we don't really have to do much here, as it will be seen as
+ * a reconnect. Still for simplicity we consider this case the same as
+ * suspend, so that the device has a chance to do notify the base
+ * station (if connected).
+ *
+ * So at the end, the three cases require common handling.
+ *
+ * If at the time of this call the device's firmware is not loaded,
+ * nothing has to be done.
+ *
+ * If the firmware is loaded, we need to:
+ *
+ * - tell the device to go into host interface power save mode, wait
+ * for it to ack
+ *
+ * This is quite more interesting than it is; we need to execute a
+ * command, but this time, we don't want the code in usb-{tx,rx}.c
+ * to call the usb_autopm_get/put_interface() barriers as it'd
+ * deadlock, so we need to decrement i2400mu->do_autopm, that acts
+ * as a poor man's semaphore. Ugly, but it works.
+ *
+ * As well, the device might refuse going to sleep for whichever
+ * reason. In this case we just fail. For system suspend/hibernate,
+ * we *can't* fail. We look at usb_dev->auto_pm to see if the
+ * suspend call comes from the USB stack or from the system and act
+ * in consequence.
+ *
+ * - stop the notification endpoint polling
+ */
+static
+int i2400mu_suspend(struct usb_interface *iface, pm_message_t pm_msg)
+{
+ int result = 0;
+ struct device *dev = &iface->dev;
+ struct i2400mu *i2400mu = usb_get_intfdata(iface);
+ struct usb_device *usb_dev = i2400mu->usb_dev;
+ struct i2400m *i2400m = &i2400mu->i2400m;
+
+ d_fnstart(3, dev, "(iface %p pm_msg %u)\n", iface, pm_msg.event);
+ if (i2400m->updown == 0)
+ goto no_firmware;
+ d_printf(1, dev, "fw up, requesting standby\n");
+ atomic_dec(&i2400mu->do_autopm);
+ result = i2400m_cmd_enter_powersave(i2400m);
+ atomic_inc(&i2400mu->do_autopm);
+ if (result < 0 && usb_dev->auto_pm == 0) {
+ /* System suspend, can't fail */
+ dev_err(dev, "failed to suspend, will reset on resume\n");
+ result = 0;
+ }
+ if (result < 0)
+ goto error_enter_powersave;
+ i2400mu_notification_release(i2400mu);
+ d_printf(1, dev, "fw up, got standby\n");
+error_enter_powersave:
+no_firmware:
+ d_fnend(3, dev, "(iface %p pm_msg %u) = %d\n",
+ iface, pm_msg.event, result);
+ return result;
+}
+
+
+static
+int i2400mu_resume(struct usb_interface *iface)
+{
+ int ret = 0;
+ struct device *dev = &iface->dev;
+ struct i2400mu *i2400mu = usb_get_intfdata(iface);
+ struct i2400m *i2400m = &i2400mu->i2400m;
+
+ d_fnstart(3, dev, "(iface %p)\n", iface);
+ if (i2400m->updown == 0) {
+ d_printf(1, dev, "fw was down, no resume neeed\n");
+ goto out;
+ }
+ d_printf(1, dev, "fw was up, resuming\n");
+ i2400mu_notification_setup(i2400mu);
+ /* USB has flow control, so we don't need to give it time to
+ * come back; otherwise, we'd use something like a get-state
+ * command... */
+out:
+ d_fnend(3, dev, "(iface %p) = %d\n", iface, ret);
+ return ret;
+}
+
+
+static
+struct usb_device_id i2400mu_id_table[] = {
+ { USB_DEVICE(0x8086, 0x0181) },
+ { USB_DEVICE(0x8086, 0x1403) },
+ { USB_DEVICE(0x8086, 0x1405) },
+ { USB_DEVICE(0x8086, 0x0180) },
+ { USB_DEVICE(0x8086, 0x0182) },
+ { USB_DEVICE(0x8086, 0x1406) },
+ { USB_DEVICE(0x8086, 0x1403) },
+ { },
+};
+MODULE_DEVICE_TABLE(usb, i2400mu_id_table);
+
+
+static
+struct usb_driver i2400mu_driver = {
+ .name = KBUILD_MODNAME,
+ .suspend = i2400mu_suspend,
+ .resume = i2400mu_resume,
+ .probe = i2400mu_probe,
+ .disconnect = i2400mu_disconnect,
+ .id_table = i2400mu_id_table,
+ .supports_autosuspend = 1,
+};
+
+static
+int __init i2400mu_driver_init(void)
+{
+ return usb_register(&i2400mu_driver);
+}
+module_init(i2400mu_driver_init);
+
+
+static
+void __exit i2400mu_driver_exit(void)
+{
+ flush_scheduled_work(); /* for the stuff we schedule from sysfs.c */
+ usb_deregister(&i2400mu_driver);
+}
+module_exit(i2400mu_driver_exit);
+
+MODULE_AUTHOR("Intel Corporation ");
+MODULE_DESCRIPTION("Intel 2400M WiMAX networking for USB");
+MODULE_LICENSE("GPL");
+MODULE_FIRMWARE(I2400MU_FW_FILE_NAME);
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index 289d81adfb9..83babb0a1df 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -150,4 +150,6 @@ source "drivers/usb/atm/Kconfig"
source "drivers/usb/gadget/Kconfig"
+source "drivers/usb/otg/Kconfig"
+
endif # USB_SUPPORT
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index d50a99f70ae..00b47ea24f8 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1275,7 +1275,7 @@ static int acm_suspend(struct usb_interface *intf, pm_message_t message)
struct acm *acm = usb_get_intfdata(intf);
int cnt;
- if (acm->dev->auto_pm) {
+ if (message.event & PM_EVENT_AUTO) {
int b;
spin_lock_irq(&acm->read_lock);
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 5a8ecc045e3..3771d6e6d0c 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -764,7 +764,8 @@ static int wdm_suspend(struct usb_interface *intf, pm_message_t message)
mutex_lock(&desc->plock);
#ifdef CONFIG_PM
- if (interface_to_usbdev(desc->intf)->auto_pm && test_bit(WDM_IN_USE, &desc->flags)) {
+ if ((message.event & PM_EVENT_AUTO) &&
+ test_bit(WDM_IN_USE, &desc->flags)) {
rv = -EBUSY;
} else {
#endif
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index 43a863c5cc4..0f5c05f6f9d 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -21,6 +21,7 @@
#include
#include
+#include
#include
#include
#include
@@ -482,7 +483,6 @@ static ssize_t usbtmc_write(struct file *filp, const char __user *buf,
int retval;
int actual;
unsigned long int n_bytes;
- int n;
int remaining;
int done;
int this_part;
@@ -526,11 +526,8 @@ static ssize_t usbtmc_write(struct file *filp, const char __user *buf,
goto exit;
}
- n_bytes = 12 + this_part;
- if (this_part % 4)
- n_bytes += 4 - this_part % 4;
- for (n = 12 + this_part; n < n_bytes; n++)
- buffer[n] = 0;
+ n_bytes = roundup(12 + this_part, 4);
+ memset(buffer + 12 + this_part, 0, n_bytes - (12 + this_part));
retval = usb_bulk_msg(data->usb_dev,
usb_sndbulkpipe(data->usb_dev,
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index aa79280df15..26fece124e0 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -981,9 +981,6 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
return -EINVAL;
if (!uurb->buffer)
return -EINVAL;
- if (uurb->signr != 0 && (uurb->signr < SIGRTMIN ||
- uurb->signr > SIGRTMAX))
- return -EINVAL;
if (!(uurb->type == USBDEVFS_URB_TYPE_CONTROL &&
(uurb->endpoint & ~USB_ENDPOINT_DIR_MASK) == 0)) {
ifnum = findintfep(ps->dev, uurb->endpoint);
@@ -1320,7 +1317,7 @@ static int get_urb32(struct usbdevfs_urb *kurb,
if (__get_user(uptr, &uurb->buffer))
return -EFAULT;
kurb->buffer = compat_ptr(uptr);
- if (__get_user(uptr, &uurb->buffer))
+ if (__get_user(uptr, &uurb->usercontext))
return -EFAULT;
kurb->usercontext = compat_ptr(uptr);
@@ -1401,8 +1398,6 @@ static int proc_disconnectsignal(struct dev_state *ps, void __user *arg)
if (copy_from_user(&ds, arg, sizeof(ds)))
return -EFAULT;
- if (ds.signr != 0 && (ds.signr < SIGRTMIN || ds.signr > SIGRTMAX))
- return -EINVAL;
ps->discsignr = ds.signr;
ps->disccontext = ds.context;
return 0;
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 8c081308b0e..98760553bc9 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -184,6 +184,20 @@ static int usb_unbind_device(struct device *dev)
return 0;
}
+/*
+ * Cancel any pending scheduled resets
+ *
+ * [see usb_queue_reset_device()]
+ *
+ * Called after unconfiguring / when releasing interfaces. See
+ * comments in __usb_queue_reset_device() regarding
+ * udev->reset_running.
+ */
+static void usb_cancel_queued_reset(struct usb_interface *iface)
+{
+ if (iface->reset_running == 0)
+ cancel_work_sync(&iface->reset_ws);
+}
/* called from driver core with dev locked */
static int usb_probe_interface(struct device *dev)
@@ -242,6 +256,7 @@ static int usb_probe_interface(struct device *dev)
mark_quiesced(intf);
intf->needs_remote_wakeup = 0;
intf->condition = USB_INTERFACE_UNBOUND;
+ usb_cancel_queued_reset(intf);
} else
intf->condition = USB_INTERFACE_BOUND;
@@ -272,6 +287,7 @@ static int usb_unbind_interface(struct device *dev)
usb_disable_interface(udev, intf);
driver->disconnect(intf);
+ usb_cancel_queued_reset(intf);
/* Reset other interface state.
* We cannot do a Set-Interface if the device is suspended or
@@ -279,9 +295,12 @@ static int usb_unbind_interface(struct device *dev)
* altsetting means creating new endpoint device entries).
* When either of these happens, defer the Set-Interface.
*/
- if (intf->cur_altsetting->desc.bAlternateSetting == 0)
- ; /* Already in altsetting 0 so skip Set-Interface */
- else if (!error && intf->dev.power.status == DPM_ON)
+ if (intf->cur_altsetting->desc.bAlternateSetting == 0) {
+ /* Already in altsetting 0 so skip Set-Interface.
+ * Just re-enable it without affecting the endpoint toggles.
+ */
+ usb_enable_interface(udev, intf, false);
+ } else if (!error && intf->dev.power.status == DPM_ON)
usb_set_interface(udev, intf->altsetting[0].
desc.bInterfaceNumber, 0);
else
@@ -380,8 +399,10 @@ void usb_driver_release_interface(struct usb_driver *driver,
if (device_is_registered(dev)) {
iface->condition = USB_INTERFACE_UNBINDING;
device_release_driver(dev);
+ } else {
+ iface->condition = USB_INTERFACE_UNBOUND;
+ usb_cancel_queued_reset(iface);
}
-
dev->driver = NULL;
usb_set_intfdata(iface, NULL);
@@ -904,7 +925,7 @@ static int usb_suspend_device(struct usb_device *udev, pm_message_t msg)
}
/* Caller has locked udev's pm_mutex */
-static int usb_resume_device(struct usb_device *udev)
+static int usb_resume_device(struct usb_device *udev, pm_message_t msg)
{
struct usb_device_driver *udriver;
int status = 0;
@@ -922,7 +943,7 @@ static int usb_resume_device(struct usb_device *udev)
udev->reset_resume = 1;
udriver = to_usb_device_driver(udev->dev.driver);
- status = udriver->resume(udev);
+ status = udriver->resume(udev, msg);
done:
dev_vdbg(&udev->dev, "%s: status %d\n", __func__, status);
@@ -942,7 +963,8 @@ static int usb_suspend_interface(struct usb_device *udev,
if (udev->state == USB_STATE_NOTATTACHED || !is_active(intf))
goto done;
- if (intf->condition == USB_INTERFACE_UNBOUND) /* This can't happen */
+ /* This can happen; see usb_driver_release_interface() */
+ if (intf->condition == USB_INTERFACE_UNBOUND)
goto done;
driver = to_usb_driver(intf->dev.driver);
@@ -950,7 +972,7 @@ static int usb_suspend_interface(struct usb_device *udev,
status = driver->suspend(intf, msg);
if (status == 0)
mark_quiesced(intf);
- else if (!udev->auto_pm)
+ else if (!(msg.event & PM_EVENT_AUTO))
dev_err(&intf->dev, "%s error %d\n",
"suspend", status);
} else {
@@ -968,7 +990,7 @@ static int usb_suspend_interface(struct usb_device *udev,
/* Caller has locked intf's usb_device's pm_mutex */
static int usb_resume_interface(struct usb_device *udev,
- struct usb_interface *intf, int reset_resume)
+ struct usb_interface *intf, pm_message_t msg, int reset_resume)
{
struct usb_driver *driver;
int status = 0;
@@ -1092,7 +1114,7 @@ static int autosuspend_check(struct usb_device *udev, int reschedule)
if (reschedule) {
if (!timer_pending(&udev->autosuspend.timer)) {
queue_delayed_work(ksuspend_usb_wq, &udev->autosuspend,
- round_jiffies_relative(suspend_time - j));
+ round_jiffies_up_relative(suspend_time - j));
}
return -EAGAIN;
}
@@ -1119,10 +1141,9 @@ static inline int autosuspend_check(struct usb_device *udev, int reschedule)
* all the interfaces which were suspended are resumed so that they remain
* in the same state as the device.
*
- * If an autosuspend is in progress (@udev->auto_pm is set), the routine
- * checks first to make sure that neither the device itself or any of its
- * active interfaces is in use (pm_usage_cnt is greater than 0). If they
- * are, the autosuspend fails.
+ * If an autosuspend is in progress the routine checks first to make sure
+ * that neither the device itself or any of its active interfaces is in use
+ * (pm_usage_cnt is greater than 0). If they are, the autosuspend fails.
*
* If the suspend succeeds, the routine recursively queues an autosuspend
* request for @udev's parent device, thereby propagating the change up
@@ -1157,7 +1178,7 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg)
udev->do_remote_wakeup = device_may_wakeup(&udev->dev);
- if (udev->auto_pm) {
+ if (msg.event & PM_EVENT_AUTO) {
status = autosuspend_check(udev, 0);
if (status < 0)
goto done;
@@ -1177,13 +1198,16 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg)
/* If the suspend failed, resume interfaces that did get suspended */
if (status != 0) {
+ pm_message_t msg2;
+
+ msg2.event = msg.event ^ (PM_EVENT_SUSPEND | PM_EVENT_RESUME);
while (--i >= 0) {
intf = udev->actconfig->interface[i];
- usb_resume_interface(udev, intf, 0);
+ usb_resume_interface(udev, intf, msg2, 0);
}
/* Try another autosuspend when the interfaces aren't busy */
- if (udev->auto_pm)
+ if (msg.event & PM_EVENT_AUTO)
autosuspend_check(udev, status == -EBUSY);
/* If the suspend succeeded then prevent any more URB submissions,
@@ -1213,6 +1237,7 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg)
/**
* usb_resume_both - resume a USB device and its interfaces
* @udev: the usb_device to resume
+ * @msg: Power Management message describing this state transition
*
* This is the central routine for resuming USB devices. It calls the
* the resume method for @udev and then calls the resume methods for all
@@ -1238,7 +1263,7 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg)
*
* This routine can run only in process context.
*/
-static int usb_resume_both(struct usb_device *udev)
+static int usb_resume_both(struct usb_device *udev, pm_message_t msg)
{
int status = 0;
int i;
@@ -1254,14 +1279,15 @@ static int usb_resume_both(struct usb_device *udev)
/* Propagate the resume up the tree, if necessary */
if (udev->state == USB_STATE_SUSPENDED) {
- if (udev->auto_pm && udev->autoresume_disabled) {
+ if ((msg.event & PM_EVENT_AUTO) &&
+ udev->autoresume_disabled) {
status = -EPERM;
goto done;
}
if (parent) {
status = usb_autoresume_device(parent);
if (status == 0) {
- status = usb_resume_device(udev);
+ status = usb_resume_device(udev, msg);
if (status || udev->state ==
USB_STATE_NOTATTACHED) {
usb_autosuspend_device(parent);
@@ -1284,15 +1310,16 @@ static int usb_resume_both(struct usb_device *udev)
/* We can't progagate beyond the USB subsystem,
* so if a root hub's controller is suspended
* then we're stuck. */
- status = usb_resume_device(udev);
+ status = usb_resume_device(udev, msg);
}
} else if (udev->reset_resume)
- status = usb_resume_device(udev);
+ status = usb_resume_device(udev, msg);
if (status == 0 && udev->actconfig) {
for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) {
intf = udev->actconfig->interface[i];
- usb_resume_interface(udev, intf, udev->reset_resume);
+ usb_resume_interface(udev, intf, msg,
+ udev->reset_resume);
}
}
@@ -1320,13 +1347,13 @@ static int usb_autopm_do_device(struct usb_device *udev, int inc_usage_cnt)
udev->last_busy = jiffies;
if (inc_usage_cnt >= 0 && udev->pm_usage_cnt > 0) {
if (udev->state == USB_STATE_SUSPENDED)
- status = usb_resume_both(udev);
+ status = usb_resume_both(udev, PMSG_AUTO_RESUME);
if (status != 0)
udev->pm_usage_cnt -= inc_usage_cnt;
else if (inc_usage_cnt)
udev->last_busy = jiffies;
} else if (inc_usage_cnt <= 0 && udev->pm_usage_cnt <= 0) {
- status = usb_suspend_both(udev, PMSG_SUSPEND);
+ status = usb_suspend_both(udev, PMSG_AUTO_SUSPEND);
}
usb_pm_unlock(udev);
return status;
@@ -1341,6 +1368,19 @@ void usb_autosuspend_work(struct work_struct *work)
usb_autopm_do_device(udev, 0);
}
+/* usb_autoresume_work - callback routine to autoresume a USB device */
+void usb_autoresume_work(struct work_struct *work)
+{
+ struct usb_device *udev =
+ container_of(work, struct usb_device, autoresume);
+
+ /* Wake it up, let the drivers do their thing, and then put it
+ * back to sleep.
+ */
+ if (usb_autopm_do_device(udev, 1) == 0)
+ usb_autopm_do_device(udev, -1);
+}
+
/**
* usb_autosuspend_device - delayed autosuspend of a USB device and its interfaces
* @udev: the usb_device to autosuspend
@@ -1437,13 +1477,14 @@ static int usb_autopm_do_interface(struct usb_interface *intf,
udev->last_busy = jiffies;
if (inc_usage_cnt >= 0 && intf->pm_usage_cnt > 0) {
if (udev->state == USB_STATE_SUSPENDED)
- status = usb_resume_both(udev);
+ status = usb_resume_both(udev,
+ PMSG_AUTO_RESUME);
if (status != 0)
intf->pm_usage_cnt -= inc_usage_cnt;
else
udev->last_busy = jiffies;
} else if (inc_usage_cnt <= 0 && intf->pm_usage_cnt <= 0) {
- status = usb_suspend_both(udev, PMSG_SUSPEND);
+ status = usb_suspend_both(udev, PMSG_AUTO_SUSPEND);
}
}
usb_pm_unlock(udev);
@@ -1491,6 +1532,45 @@ void usb_autopm_put_interface(struct usb_interface *intf)
}
EXPORT_SYMBOL_GPL(usb_autopm_put_interface);
+/**
+ * usb_autopm_put_interface_async - decrement a USB interface's PM-usage counter
+ * @intf: the usb_interface whose counter should be decremented
+ *
+ * This routine does essentially the same thing as
+ * usb_autopm_put_interface(): it decrements @intf's usage counter and
+ * queues a delayed autosuspend request if the counter is <= 0. The
+ * difference is that it does not acquire the device's pm_mutex;
+ * callers must handle all synchronization issues themselves.
+ *
+ * Typically a driver would call this routine during an URB's completion
+ * handler, if no more URBs were pending.
+ *
+ * This routine can run in atomic context.
+ */
+void usb_autopm_put_interface_async(struct usb_interface *intf)
+{
+ struct usb_device *udev = interface_to_usbdev(intf);
+ int status = 0;
+
+ if (intf->condition == USB_INTERFACE_UNBOUND) {
+ status = -ENODEV;
+ } else {
+ udev->last_busy = jiffies;
+ --intf->pm_usage_cnt;
+ if (udev->autosuspend_disabled || udev->autosuspend_delay < 0)
+ status = -EPERM;
+ else if (intf->pm_usage_cnt <= 0 &&
+ !timer_pending(&udev->autosuspend.timer)) {
+ queue_delayed_work(ksuspend_usb_wq, &udev->autosuspend,
+ round_jiffies_up_relative(
+ udev->autosuspend_delay));
+ }
+ }
+ dev_vdbg(&intf->dev, "%s: status %d cnt %d\n",
+ __func__, status, intf->pm_usage_cnt);
+}
+EXPORT_SYMBOL_GPL(usb_autopm_put_interface_async);
+
/**
* usb_autopm_get_interface - increment a USB interface's PM-usage counter
* @intf: the usb_interface whose counter should be incremented
@@ -1536,6 +1616,37 @@ int usb_autopm_get_interface(struct usb_interface *intf)
}
EXPORT_SYMBOL_GPL(usb_autopm_get_interface);
+/**
+ * usb_autopm_get_interface_async - increment a USB interface's PM-usage counter
+ * @intf: the usb_interface whose counter should be incremented
+ *
+ * This routine does much the same thing as
+ * usb_autopm_get_interface(): it increments @intf's usage counter and
+ * queues an autoresume request if the result is > 0. The differences
+ * are that it does not acquire the device's pm_mutex (callers must
+ * handle all synchronization issues themselves), and it does not
+ * autoresume the device directly (it only queues a request). After a
+ * successful call, the device will generally not yet be resumed.
+ *
+ * This routine can run in atomic context.
+ */
+int usb_autopm_get_interface_async(struct usb_interface *intf)
+{
+ struct usb_device *udev = interface_to_usbdev(intf);
+ int status = 0;
+
+ if (intf->condition == USB_INTERFACE_UNBOUND)
+ status = -ENODEV;
+ else if (udev->autoresume_disabled)
+ status = -EPERM;
+ else if (++intf->pm_usage_cnt > 0 && udev->state == USB_STATE_SUSPENDED)
+ queue_work(ksuspend_usb_wq, &udev->autoresume);
+ dev_vdbg(&intf->dev, "%s: status %d cnt %d\n",
+ __func__, status, intf->pm_usage_cnt);
+ return status;
+}
+EXPORT_SYMBOL_GPL(usb_autopm_get_interface_async);
+
/**
* usb_autopm_set_interface - set a USB interface's autosuspend state
* @intf: the usb_interface whose state should be set
@@ -1563,6 +1674,9 @@ EXPORT_SYMBOL_GPL(usb_autopm_set_interface);
void usb_autosuspend_work(struct work_struct *work)
{}
+void usb_autoresume_work(struct work_struct *work)
+{}
+
#endif /* CONFIG_USB_SUSPEND */
/**
@@ -1595,6 +1709,7 @@ int usb_external_suspend_device(struct usb_device *udev, pm_message_t msg)
/**
* usb_external_resume_device - external resume of a USB device and its interfaces
* @udev: the usb_device to resume
+ * @msg: Power Management message describing this state transition
*
* This routine handles external resume requests: ones not generated
* internally by a USB driver (autoresume) but rather coming from the user
@@ -1603,13 +1718,13 @@ int usb_external_suspend_device(struct usb_device *udev, pm_message_t msg)
*
* The caller must hold @udev's device lock.
*/
-int usb_external_resume_device(struct usb_device *udev)
+int usb_external_resume_device(struct usb_device *udev, pm_message_t msg)
{
int status;
usb_pm_lock(udev);
udev->auto_pm = 0;
- status = usb_resume_both(udev);
+ status = usb_resume_both(udev, msg);
udev->last_busy = jiffies;
usb_pm_unlock(udev);
if (status == 0)
@@ -1622,7 +1737,7 @@ int usb_external_resume_device(struct usb_device *udev)
return status;
}
-int usb_suspend(struct device *dev, pm_message_t message)
+int usb_suspend(struct device *dev, pm_message_t msg)
{
struct usb_device *udev;
@@ -1641,10 +1756,10 @@ int usb_suspend(struct device *dev, pm_message_t message)
}
udev->skip_sys_resume = 0;
- return usb_external_suspend_device(udev, message);
+ return usb_external_suspend_device(udev, msg);
}
-int usb_resume(struct device *dev)
+int usb_resume(struct device *dev, pm_message_t msg)
{
struct usb_device *udev;
@@ -1656,7 +1771,7 @@ int usb_resume(struct device *dev)
*/
if (udev->skip_sys_resume)
return 0;
- return usb_external_resume_device(udev);
+ return usb_external_resume_device(udev, msg);
}
#endif /* CONFIG_PM */
diff --git a/drivers/usb/core/endpoint.c b/drivers/usb/core/endpoint.c
index 946fae43d62..e1710f260b4 100644
--- a/drivers/usb/core/endpoint.c
+++ b/drivers/usb/core/endpoint.c
@@ -276,7 +276,7 @@ static void ep_device_release(struct device *dev)
kfree(ep_dev);
}
-int usb_create_ep_files(struct device *parent,
+int usb_create_ep_devs(struct device *parent,
struct usb_host_endpoint *endpoint,
struct usb_device *udev)
{
@@ -340,7 +340,7 @@ exit:
return retval;
}
-void usb_remove_ep_files(struct usb_host_endpoint *endpoint)
+void usb_remove_ep_devs(struct usb_host_endpoint *endpoint)
{
struct ep_device *ep_dev = endpoint->ep_dev;
diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c
index 7e912f21fd3..30ecac3af15 100644
--- a/drivers/usb/core/generic.c
+++ b/drivers/usb/core/generic.c
@@ -200,18 +200,18 @@ static int generic_suspend(struct usb_device *udev, pm_message_t msg)
* interfaces manually by doing a bus (or "global") suspend.
*/
if (!udev->parent)
- rc = hcd_bus_suspend(udev);
+ rc = hcd_bus_suspend(udev, msg);
/* Non-root devices don't need to do anything for FREEZE or PRETHAW */
else if (msg.event == PM_EVENT_FREEZE || msg.event == PM_EVENT_PRETHAW)
rc = 0;
else
- rc = usb_port_suspend(udev);
+ rc = usb_port_suspend(udev, msg);
return rc;
}
-static int generic_resume(struct usb_device *udev)
+static int generic_resume(struct usb_device *udev, pm_message_t msg)
{
int rc;
@@ -221,9 +221,9 @@ static int generic_resume(struct usb_device *udev)
* interfaces manually by doing a bus (or "global") resume.
*/
if (!udev->parent)
- rc = hcd_bus_resume(udev);
+ rc = hcd_bus_resume(udev, msg);
else
- rc = usb_port_resume(udev);
+ rc = usb_port_resume(udev, msg);
return rc;
}
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index 5b87ae7f0a6..507741ed448 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -128,6 +128,7 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
}
pci_set_master(dev);
+ device_set_wakeup_enable(&dev->dev, 1);
retval = usb_add_hcd(hcd, dev->irq, IRQF_DISABLED | IRQF_SHARED);
if (retval != 0)
@@ -191,17 +192,15 @@ EXPORT_SYMBOL_GPL(usb_hcd_pci_remove);
/**
* usb_hcd_pci_suspend - power management suspend of a PCI-based HCD
* @dev: USB Host Controller being suspended
- * @message: semantics in flux
+ * @message: Power Management message describing this state transition
*
- * Store this function in the HCD's struct pci_driver as suspend().
+ * Store this function in the HCD's struct pci_driver as .suspend.
*/
int usb_hcd_pci_suspend(struct pci_dev *dev, pm_message_t message)
{
- struct usb_hcd *hcd;
+ struct usb_hcd *hcd = pci_get_drvdata(dev);
int retval = 0;
- int has_pci_pm;
-
- hcd = pci_get_drvdata(dev);
+ int wake, w;
/* Root hub suspend should have stopped all downstream traffic,
* and all bus master traffic. And done so for both the interface
@@ -212,8 +211,15 @@ int usb_hcd_pci_suspend(struct pci_dev *dev, pm_message_t message)
* otherwise the swsusp will save (and restore) garbage state.
*/
if (!(hcd->state == HC_STATE_SUSPENDED ||
- hcd->state == HC_STATE_HALT))
- return -EBUSY;
+ hcd->state == HC_STATE_HALT)) {
+ dev_warn(&dev->dev, "Root hub is not suspended\n");
+ retval = -EBUSY;
+ goto done;
+ }
+
+ /* We might already be suspended (runtime PM -- not yet written) */
+ if (dev->current_state != PCI_D0)
+ goto done;
if (hcd->driver->pci_suspend) {
retval = hcd->driver->pci_suspend(hcd, message);
@@ -221,49 +227,60 @@ int usb_hcd_pci_suspend(struct pci_dev *dev, pm_message_t message)
if (retval)
goto done;
}
+
synchronize_irq(dev->irq);
- /* FIXME until the generic PM interfaces change a lot more, this
- * can't use PCI D1 and D2 states. For example, the confusion
- * between messages and states will need to vanish, and messages
- * will need to provide a target system state again.
- *
- * It'll be important to learn characteristics of the target state,
- * especially on embedded hardware where the HCD will often be in
- * charge of an external VBUS power supply and one or more clocks.
- * Some target system states will leave them active; others won't.
- * (With PCI, that's often handled by platform BIOS code.)
+ /* Don't fail on error to enable wakeup. We rely on pci code
+ * to reject requests the hardware can't implement, rather
+ * than coding the same thing.
*/
-
- /* even when the PCI layer rejects some of the PCI calls
- * below, HCs can try global suspend and reduce DMA traffic.
- * PM-sensitive HCDs may already have done this.
- */
- has_pci_pm = pci_find_capability(dev, PCI_CAP_ID_PM);
+ wake = (hcd->state == HC_STATE_SUSPENDED &&
+ device_may_wakeup(&dev->dev));
+ w = pci_wake_from_d3(dev, wake);
+ if (w < 0)
+ wake = w;
+ dev_dbg(&dev->dev, "wakeup: %d\n", wake);
/* Downstream ports from this root hub should already be quiesced, so
* there will be no DMA activity. Now we can shut down the upstream
* link (except maybe for PME# resume signaling) and enter some PCI
* low power state, if the hardware allows.
*/
- if (hcd->state == HC_STATE_SUSPENDED) {
+ pci_disable_device(dev);
+ done:
+ return retval;
+}
+EXPORT_SYMBOL_GPL(usb_hcd_pci_suspend);
- /* no DMA or IRQs except when HC is active */
- if (dev->current_state == PCI_D0) {
- pci_save_state(dev);
- pci_disable_device(dev);
- }
+/**
+ * usb_hcd_pci_suspend_late - suspend a PCI-based HCD after IRQs are disabled
+ * @dev: USB Host Controller being suspended
+ * @message: Power Management message describing this state transition
+ *
+ * Store this function in the HCD's struct pci_driver as .suspend_late.
+ */
+int usb_hcd_pci_suspend_late(struct pci_dev *dev, pm_message_t message)
+{
+ int retval = 0;
+ int has_pci_pm;
- if (message.event == PM_EVENT_FREEZE ||
- message.event == PM_EVENT_PRETHAW) {
- dev_dbg(hcd->self.controller, "--> no state change\n");
- goto done;
- }
+ /* We might already be suspended (runtime PM -- not yet written) */
+ if (dev->current_state != PCI_D0)
+ goto done;
- if (!has_pci_pm) {
- dev_dbg(hcd->self.controller, "--> PCI D0/legacy\n");
- goto done;
- }
+ pci_save_state(dev);
+
+ /* Don't change state if we don't need to */
+ if (message.event == PM_EVENT_FREEZE ||
+ message.event == PM_EVENT_PRETHAW) {
+ dev_dbg(&dev->dev, "--> no state change\n");
+ goto done;
+ }
+
+ has_pci_pm = pci_find_capability(dev, PCI_CAP_ID_PM);
+ if (!has_pci_pm) {
+ dev_dbg(&dev->dev, "--> PCI D0 legacy\n");
+ } else {
/* NOTE: dev->current_state becomes nonzero only here, and
* only for devices that support PCI PM. Also, exiting
@@ -273,35 +290,16 @@ int usb_hcd_pci_suspend(struct pci_dev *dev, pm_message_t message)
retval = pci_set_power_state(dev, PCI_D3hot);
suspend_report_result(pci_set_power_state, retval);
if (retval == 0) {
- int wake = device_can_wakeup(&hcd->self.root_hub->dev);
-
- wake = wake && device_may_wakeup(hcd->self.controller);
-
- dev_dbg(hcd->self.controller, "--> PCI D3%s\n",
- wake ? "/wakeup" : "");
-
- /* Ignore these return values. We rely on pci code to
- * reject requests the hardware can't implement, rather
- * than coding the same thing.
- */
- (void) pci_enable_wake(dev, PCI_D3hot, wake);
- (void) pci_enable_wake(dev, PCI_D3cold, wake);
+ dev_dbg(&dev->dev, "--> PCI D3\n");
} else {
dev_dbg(&dev->dev, "PCI D3 suspend fail, %d\n",
retval);
- (void) usb_hcd_pci_resume(dev);
+ pci_restore_state(dev);
}
-
- } else if (hcd->state != HC_STATE_HALT) {
- dev_dbg(hcd->self.controller, "hcd state %d; not suspended\n",
- hcd->state);
- WARN_ON(1);
- retval = -EINVAL;
}
-done:
- if (retval == 0) {
#ifdef CONFIG_PPC_PMAC
+ if (retval == 0) {
/* Disable ASIC clocks for USB */
if (machine_is(powermac)) {
struct device_node *of_node;
@@ -311,30 +309,24 @@ done:
pmac_call_feature(PMAC_FTR_USB_ENABLE,
of_node, 0, 0);
}
-#endif
}
+#endif
+ done:
return retval;
}
-EXPORT_SYMBOL_GPL(usb_hcd_pci_suspend);
+EXPORT_SYMBOL_GPL(usb_hcd_pci_suspend_late);
/**
- * usb_hcd_pci_resume - power management resume of a PCI-based HCD
+ * usb_hcd_pci_resume_early - resume a PCI-based HCD before IRQs are enabled
* @dev: USB Host Controller being resumed
*
- * Store this function in the HCD's struct pci_driver as resume().
+ * Store this function in the HCD's struct pci_driver as .resume_early.
*/
-int usb_hcd_pci_resume(struct pci_dev *dev)
+int usb_hcd_pci_resume_early(struct pci_dev *dev)
{
- struct usb_hcd *hcd;
- int retval;
-
- hcd = pci_get_drvdata(dev);
- if (hcd->state != HC_STATE_SUSPENDED) {
- dev_dbg(hcd->self.controller,
- "can't resume, not suspended!\n");
- return 0;
- }
+ int retval = 0;
+ pci_power_t state = dev->current_state;
#ifdef CONFIG_PPC_PMAC
/* Reenable ASIC clocks for USB */
@@ -352,7 +344,7 @@ int usb_hcd_pci_resume(struct pci_dev *dev)
* calls "standby", "suspend to RAM", and so on). There are also
* dirty cases when swsusp fakes a suspend in "shutdown" mode.
*/
- if (dev->current_state != PCI_D0) {
+ if (state != PCI_D0) {
#ifdef DEBUG
int pci_pm;
u16 pmcr;
@@ -364,8 +356,7 @@ int usb_hcd_pci_resume(struct pci_dev *dev)
/* Clean case: power to USB and to HC registers was
* maintained; remote wakeup is easy.
*/
- dev_dbg(hcd->self.controller, "resume from PCI D%d\n",
- pmcr);
+ dev_dbg(&dev->dev, "resume from PCI D%d\n", pmcr);
} else {
/* Clean: HC lost Vcc power, D0 uninitialized
* + Vaux may have preserved port and transceiver
@@ -376,32 +367,55 @@ int usb_hcd_pci_resume(struct pci_dev *dev)
* + after BIOS init
* + after Linux init (HCD statically linked)
*/
- dev_dbg(hcd->self.controller,
- "PCI D0, from previous PCI D%d\n",
- dev->current_state);
+ dev_dbg(&dev->dev, "resume from previous PCI D%d\n",
+ state);
}
#endif
- /* yes, ignore these results too... */
- (void) pci_enable_wake(dev, dev->current_state, 0);
- (void) pci_enable_wake(dev, PCI_D3cold, 0);
+
+ retval = pci_set_power_state(dev, PCI_D0);
} else {
/* Same basic cases: clean (powered/not), dirty */
- dev_dbg(hcd->self.controller, "PCI legacy resume\n");
+ dev_dbg(&dev->dev, "PCI legacy resume\n");
+ }
+
+ if (retval < 0)
+ dev_err(&dev->dev, "can't resume: %d\n", retval);
+ else
+ pci_restore_state(dev);
+
+ return retval;
+}
+EXPORT_SYMBOL_GPL(usb_hcd_pci_resume_early);
+
+/**
+ * usb_hcd_pci_resume - power management resume of a PCI-based HCD
+ * @dev: USB Host Controller being resumed
+ *
+ * Store this function in the HCD's struct pci_driver as .resume.
+ */
+int usb_hcd_pci_resume(struct pci_dev *dev)
+{
+ struct usb_hcd *hcd;
+ int retval;
+
+ hcd = pci_get_drvdata(dev);
+ if (hcd->state != HC_STATE_SUSPENDED) {
+ dev_dbg(hcd->self.controller,
+ "can't resume, not suspended!\n");
+ return 0;
}
- /* NOTE: the PCI API itself is asymmetric here. We don't need to
- * pci_set_power_state(PCI_D0) since that's part of re-enabling;
- * but that won't re-enable bus mastering. Yet pci_disable_device()
- * explicitly disables bus mastering...
- */
retval = pci_enable_device(dev);
if (retval < 0) {
- dev_err(hcd->self.controller,
- "can't re-enable after resume, %d!\n", retval);
+ dev_err(&dev->dev, "can't re-enable after resume, %d!\n",
+ retval);
return retval;
}
+
pci_set_master(dev);
- pci_restore_state(dev);
+
+ /* yes, ignore this result too... */
+ (void) pci_wake_from_d3(dev, 0);
clear_bit(HCD_FLAG_SAW_IRQ, &hcd->flags);
@@ -413,7 +427,6 @@ int usb_hcd_pci_resume(struct pci_dev *dev)
usb_hc_died(hcd);
}
}
-
return retval;
}
EXPORT_SYMBOL_GPL(usb_hcd_pci_resume);
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index e1b42626d04..3c711db55d8 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1010,7 +1010,7 @@ int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb)
spin_lock(&hcd_urb_list_lock);
/* Check that the URB isn't being killed */
- if (unlikely(urb->reject)) {
+ if (unlikely(atomic_read(&urb->reject))) {
rc = -EPERM;
goto done;
}
@@ -1340,7 +1340,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
INIT_LIST_HEAD(&urb->urb_list);
atomic_dec(&urb->use_count);
atomic_dec(&urb->dev->urbnum);
- if (urb->reject)
+ if (atomic_read(&urb->reject))
wake_up(&usb_kill_urb_queue);
usb_put_urb(urb);
}
@@ -1444,7 +1444,7 @@ void usb_hcd_giveback_urb(struct usb_hcd *hcd, struct urb *urb, int status)
urb->status = status;
urb->complete (urb);
atomic_dec (&urb->use_count);
- if (unlikely (urb->reject))
+ if (unlikely(atomic_read(&urb->reject)))
wake_up (&usb_kill_urb_queue);
usb_put_urb (urb);
}
@@ -1573,14 +1573,14 @@ int usb_hcd_get_frame_number (struct usb_device *udev)
#ifdef CONFIG_PM
-int hcd_bus_suspend(struct usb_device *rhdev)
+int hcd_bus_suspend(struct usb_device *rhdev, pm_message_t msg)
{
struct usb_hcd *hcd = container_of(rhdev->bus, struct usb_hcd, self);
int status;
int old_state = hcd->state;
dev_dbg(&rhdev->dev, "bus %s%s\n",
- rhdev->auto_pm ? "auto-" : "", "suspend");
+ (msg.event & PM_EVENT_AUTO ? "auto-" : ""), "suspend");
if (!hcd->driver->bus_suspend) {
status = -ENOENT;
} else {
@@ -1598,14 +1598,14 @@ int hcd_bus_suspend(struct usb_device *rhdev)
return status;
}
-int hcd_bus_resume(struct usb_device *rhdev)
+int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg)
{
struct usb_hcd *hcd = container_of(rhdev->bus, struct usb_hcd, self);
int status;
int old_state = hcd->state;
dev_dbg(&rhdev->dev, "usb %s%s\n",
- rhdev->auto_pm ? "auto-" : "", "resume");
+ (msg.event & PM_EVENT_AUTO ? "auto-" : ""), "resume");
if (!hcd->driver->bus_resume)
return -ENOENT;
if (hcd->state == HC_STATE_RUNNING)
@@ -1638,7 +1638,7 @@ static void hcd_resume_work(struct work_struct *work)
usb_lock_device(udev);
usb_mark_last_busy(udev);
- usb_external_resume_device(udev);
+ usb_external_resume_device(udev, PMSG_REMOTE_RESUME);
usb_unlock_device(udev);
}
@@ -2028,7 +2028,7 @@ EXPORT_SYMBOL_GPL(usb_hcd_platform_shutdown);
/*-------------------------------------------------------------------------*/
-#if defined(CONFIG_USB_MON)
+#if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
struct usb_mon_operations *mon_ops;
@@ -2064,4 +2064,4 @@ void usb_mon_deregister (void)
}
EXPORT_SYMBOL_GPL (usb_mon_deregister);
-#endif /* CONFIG_USB_MON */
+#endif /* CONFIG_USB_MON || CONFIG_USB_MON_MODULE */
diff --git a/drivers/usb/core/hcd.h b/drivers/usb/core/hcd.h
index 9465e70f4dd..572d2cf46e8 100644
--- a/drivers/usb/core/hcd.h
+++ b/drivers/usb/core/hcd.h
@@ -16,6 +16,8 @@
* Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#ifndef __USB_CORE_HCD_H
+#define __USB_CORE_HCD_H
#ifdef __KERNEL__
@@ -254,7 +256,9 @@ extern int usb_hcd_pci_probe(struct pci_dev *dev,
extern void usb_hcd_pci_remove(struct pci_dev *dev);
#ifdef CONFIG_PM
-extern int usb_hcd_pci_suspend(struct pci_dev *dev, pm_message_t state);
+extern int usb_hcd_pci_suspend(struct pci_dev *dev, pm_message_t msg);
+extern int usb_hcd_pci_suspend_late(struct pci_dev *dev, pm_message_t msg);
+extern int usb_hcd_pci_resume_early(struct pci_dev *dev);
extern int usb_hcd_pci_resume(struct pci_dev *dev);
#endif /* CONFIG_PM */
@@ -386,8 +390,8 @@ extern int usb_find_interface_driver(struct usb_device *dev,
#ifdef CONFIG_PM
extern void usb_hcd_resume_root_hub(struct usb_hcd *hcd);
extern void usb_root_hub_lost_power(struct usb_device *rhdev);
-extern int hcd_bus_suspend(struct usb_device *rhdev);
-extern int hcd_bus_resume(struct usb_device *rhdev);
+extern int hcd_bus_suspend(struct usb_device *rhdev, pm_message_t msg);
+extern int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg);
#else
static inline void usb_hcd_resume_root_hub(struct usb_hcd *hcd)
{
@@ -419,7 +423,7 @@ static inline void usbfs_cleanup(void) { }
/*-------------------------------------------------------------------------*/
-#if defined(CONFIG_USB_MON)
+#if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
struct usb_mon_operations {
void (*urb_submit)(struct usb_bus *bus, struct urb *urb);
@@ -461,7 +465,7 @@ static inline void usbmon_urb_submit_error(struct usb_bus *bus, struct urb *urb,
static inline void usbmon_urb_complete(struct usb_bus *bus, struct urb *urb,
int status) {}
-#endif /* CONFIG_USB_MON */
+#endif /* CONFIG_USB_MON || CONFIG_USB_MON_MODULE */
/*-------------------------------------------------------------------------*/
@@ -490,3 +494,5 @@ extern struct rw_semaphore ehci_cf_port_reset_rwsem;
extern unsigned long usb_hcds_loaded;
#endif /* __KERNEL__ */
+
+#endif /* __USB_CORE_HCD_H */
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index b19cbfcd51d..d5d0e40b1e2 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -107,7 +107,9 @@ MODULE_PARM_DESC (blinkenlights, "true to cycle leds on hubs");
/* define initial 64-byte descriptor request timeout in milliseconds */
static int initial_descriptor_timeout = USB_CTRL_GET_TIMEOUT;
module_param(initial_descriptor_timeout, int, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(initial_descriptor_timeout, "initial 64-byte descriptor request timeout in milliseconds (default 5000 - 5.0 seconds)");
+MODULE_PARM_DESC(initial_descriptor_timeout,
+ "initial 64-byte descriptor request timeout in milliseconds "
+ "(default 5000 - 5.0 seconds)");
/*
* As of 2.6.10 we introduce a new USB device initialization scheme which
@@ -1136,8 +1138,8 @@ static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id)
hdev = interface_to_usbdev(intf);
if (hdev->level == MAX_TOPO_LEVEL) {
- dev_err(&intf->dev, "Unsupported bus topology: "
- "hub nested too deep\n");
+ dev_err(&intf->dev,
+ "Unsupported bus topology: hub nested too deep\n");
return -E2BIG;
}
@@ -1374,8 +1376,9 @@ static void usb_stop_pm(struct usb_device *udev)
usb_autosuspend_device(udev->parent);
usb_pm_unlock(udev);
- /* Stop any autosuspend requests already submitted */
- cancel_rearming_delayed_work(&udev->autosuspend);
+ /* Stop any autosuspend or autoresume requests already submitted */
+ cancel_delayed_work_sync(&udev->autosuspend);
+ cancel_work_sync(&udev->autoresume);
}
#else
@@ -1434,17 +1437,12 @@ void usb_disconnect(struct usb_device **pdev)
usb_disable_device(udev, 0);
usb_hcd_synchronize_unlinks(udev);
+ usb_remove_ep_devs(&udev->ep0);
usb_unlock_device(udev);
- /* Remove the device-specific files from sysfs. This must be
- * done with udev unlocked, because some of the attribute
- * routines try to acquire the device lock.
- */
- usb_remove_sysfs_dev_files(udev);
-
/* Unregister the device. The device driver is responsible
- * for removing the device files from usbfs and sysfs and for
- * de-configuring the device.
+ * for de-configuring the device and invoking the remove-device
+ * notifier chain (used by usbfs and possibly others).
*/
device_del(&udev->dev);
@@ -1476,8 +1474,8 @@ static void announce_device(struct usb_device *udev)
dev_info(&udev->dev, "New USB device found, idVendor=%04x, idProduct=%04x\n",
le16_to_cpu(udev->descriptor.idVendor),
le16_to_cpu(udev->descriptor.idProduct));
- dev_info(&udev->dev, "New USB device strings: Mfr=%d, Product=%d, "
- "SerialNumber=%d\n",
+ dev_info(&udev->dev,
+ "New USB device strings: Mfr=%d, Product=%d, SerialNumber=%d\n",
udev->descriptor.iManufacturer,
udev->descriptor.iProduct,
udev->descriptor.iSerialNumber);
@@ -1542,7 +1540,7 @@ static int usb_configure_device_otg(struct usb_device *udev)
* customize to match your product.
*/
dev_info(&udev->dev,
- "can't set HNP mode; %d\n",
+ "can't set HNP mode: %d\n",
err);
bus->b_hnp_enable = 0;
}
@@ -1635,6 +1633,10 @@ int usb_new_device(struct usb_device *udev)
{
int err;
+ /* Increment the parent's count of unsuspended children */
+ if (udev->parent)
+ usb_autoresume_device(udev->parent);
+
usb_detect_quirks(udev); /* Determine quirks */
err = usb_configure_device(udev); /* detect & probe dev/intfs */
if (err < 0)
@@ -1643,13 +1645,12 @@ int usb_new_device(struct usb_device *udev)
udev->dev.devt = MKDEV(USB_DEVICE_MAJOR,
(((udev->bus->busnum-1) * 128) + (udev->devnum-1)));
- /* Increment the parent's count of unsuspended children */
- if (udev->parent)
- usb_autoresume_device(udev->parent);
+ /* Tell the world! */
+ announce_device(udev);
/* Register the device. The device driver is responsible
- * for adding the device files to sysfs and for configuring
- * the device.
+ * for configuring the device and invoking the add-device
+ * notifier chain (used by usbfs and possibly others).
*/
err = device_add(&udev->dev);
if (err) {
@@ -1657,15 +1658,12 @@ int usb_new_device(struct usb_device *udev)
goto fail;
}
- /* put device-specific files into sysfs */
- usb_create_sysfs_dev_files(udev);
-
- /* Tell the world! */
- announce_device(udev);
+ (void) usb_create_ep_devs(&udev->dev, &udev->ep0, udev);
return err;
fail:
usb_set_device_state(udev, USB_STATE_NOTATTACHED);
+ usb_stop_pm(udev);
return err;
}
@@ -1982,7 +1980,7 @@ static int check_port_resume_type(struct usb_device *udev,
*
* Returns 0 on success, else negative errno.
*/
-int usb_port_suspend(struct usb_device *udev)
+int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
{
struct usb_hub *hub = hdev_to_hub(udev->parent);
int port1 = udev->portnum;
@@ -2021,7 +2019,7 @@ int usb_port_suspend(struct usb_device *udev)
} else {
/* device has up to 10 msec to fully suspend */
dev_dbg(&udev->dev, "usb %ssuspend\n",
- udev->auto_pm ? "auto-" : "");
+ (msg.event & PM_EVENT_AUTO ? "auto-" : ""));
usb_set_device_state(udev, USB_STATE_SUSPENDED);
msleep(10);
}
@@ -2045,8 +2043,8 @@ static int finish_port_resume(struct usb_device *udev)
u16 devstatus;
/* caller owns the udev device lock */
- dev_dbg(&udev->dev, "finish %sresume\n",
- udev->reset_resume ? "reset-" : "");
+ dev_dbg(&udev->dev, "%s\n",
+ udev->reset_resume ? "finish reset-resume" : "finish resume");
/* usb ch9 identifies four variants of SUSPENDED, based on what
* state the device resumes to. Linux currently won't see the
@@ -2098,8 +2096,9 @@ static int finish_port_resume(struct usb_device *udev)
NULL, 0,
USB_CTRL_SET_TIMEOUT);
if (status)
- dev_dbg(&udev->dev, "disable remote "
- "wakeup, status %d\n", status);
+ dev_dbg(&udev->dev,
+ "disable remote wakeup, status %d\n",
+ status);
}
status = 0;
}
@@ -2140,7 +2139,7 @@ static int finish_port_resume(struct usb_device *udev)
*
* Returns 0 on success, else negative errno.
*/
-int usb_port_resume(struct usb_device *udev)
+int usb_port_resume(struct usb_device *udev, pm_message_t msg)
{
struct usb_hub *hub = hdev_to_hub(udev->parent);
int port1 = udev->portnum;
@@ -2165,7 +2164,7 @@ int usb_port_resume(struct usb_device *udev)
} else {
/* drive resume for at least 20 msec */
dev_dbg(&udev->dev, "usb %sresume\n",
- udev->auto_pm ? "auto-" : "");
+ (msg.event & PM_EVENT_AUTO ? "auto-" : ""));
msleep(25);
/* Virtual root hubs can trigger on GET_PORT_STATUS to
@@ -2206,7 +2205,7 @@ static int remote_wakeup(struct usb_device *udev)
if (udev->state == USB_STATE_SUSPENDED) {
dev_dbg(&udev->dev, "usb %sresume\n", "wakeup-");
usb_mark_last_busy(udev);
- status = usb_external_resume_device(udev);
+ status = usb_external_resume_device(udev, PMSG_REMOTE_RESUME);
}
return status;
}
@@ -2215,14 +2214,14 @@ static int remote_wakeup(struct usb_device *udev)
/* When CONFIG_USB_SUSPEND isn't set, we never suspend or resume any ports. */
-int usb_port_suspend(struct usb_device *udev)
+int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
{
return 0;
}
/* However we may need to do a reset-resume */
-int usb_port_resume(struct usb_device *udev)
+int usb_port_resume(struct usb_device *udev, pm_message_t msg)
{
struct usb_hub *hub = hdev_to_hub(udev->parent);
int port1 = udev->portnum;
@@ -2262,7 +2261,7 @@ static int hub_suspend(struct usb_interface *intf, pm_message_t msg)
udev = hdev->children [port1-1];
if (udev && udev->can_submit) {
- if (!hdev->auto_pm)
+ if (!(msg.event & PM_EVENT_AUTO))
dev_dbg(&intf->dev, "port %d nyet suspended\n",
port1);
return -EBUSY;
@@ -2385,7 +2384,7 @@ void usb_ep0_reinit(struct usb_device *udev)
{
usb_disable_endpoint(udev, 0 + USB_DIR_IN);
usb_disable_endpoint(udev, 0 + USB_DIR_OUT);
- usb_enable_endpoint(udev, &udev->ep0);
+ usb_enable_endpoint(udev, &udev->ep0, true);
}
EXPORT_SYMBOL_GPL(usb_ep0_reinit);
@@ -2582,9 +2581,9 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
goto fail;
}
if (r) {
- dev_err(&udev->dev, "device descriptor "
- "read/%s, error %d\n",
- "64", r);
+ dev_err(&udev->dev,
+ "device descriptor read/64, error %d\n",
+ r);
retval = -EMSGSIZE;
continue;
}
@@ -2621,9 +2620,9 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
retval = usb_get_device_descriptor(udev, 8);
if (retval < 8) {
- dev_err(&udev->dev, "device descriptor "
- "read/%s, error %d\n",
- "8", retval);
+ dev_err(&udev->dev,
+ "device descriptor read/8, error %d\n",
+ retval);
if (retval >= 0)
retval = -EMSGSIZE;
} else {
@@ -2650,8 +2649,8 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
retval = usb_get_device_descriptor(udev, USB_DT_DEVICE_SIZE);
if (retval < (signed)sizeof(udev->descriptor)) {
- dev_err(&udev->dev, "device descriptor read/%s, error %d\n",
- "all", retval);
+ dev_err(&udev->dev, "device descriptor read/all, error %d\n",
+ retval);
if (retval >= 0)
retval = -ENOMSG;
goto fail;
@@ -2719,9 +2718,9 @@ hub_power_remaining (struct usb_hub *hub)
else
delta = 8;
if (delta > hub->mA_per_port)
- dev_warn(&udev->dev, "%dmA is over %umA budget "
- "for port %d!\n",
- delta, hub->mA_per_port, port1);
+ dev_warn(&udev->dev,
+ "%dmA is over %umA budget for port %d!\n",
+ delta, hub->mA_per_port, port1);
remaining -= delta;
}
if (remaining < 0) {
@@ -3517,3 +3516,46 @@ int usb_reset_device(struct usb_device *udev)
return ret;
}
EXPORT_SYMBOL_GPL(usb_reset_device);
+
+
+/**
+ * usb_queue_reset_device - Reset a USB device from an atomic context
+ * @iface: USB interface belonging to the device to reset
+ *
+ * This function can be used to reset a USB device from an atomic
+ * context, where usb_reset_device() won't work (as it blocks).
+ *
+ * Doing a reset via this method is functionally equivalent to calling
+ * usb_reset_device(), except for the fact that it is delayed to a
+ * workqueue. This means that any drivers bound to other interfaces
+ * might be unbound, as well as users from usbfs in user space.
+ *
+ * Corner cases:
+ *
+ * - Scheduling two resets at the same time from two different drivers
+ * attached to two different interfaces of the same device is
+ * possible; depending on how the driver attached to each interface
+ * handles ->pre_reset(), the second reset might happen or not.
+ *
+ * - If a driver is unbound and it had a pending reset, the reset will
+ * be cancelled.
+ *
+ * - This function can be called during .probe() or .disconnect()
+ * times. On return from .disconnect(), any pending resets will be
+ * cancelled.
+ *
+ * There is no no need to lock/unlock the @reset_ws as schedule_work()
+ * does its own.
+ *
+ * NOTE: We don't do any reference count tracking because it is not
+ * needed. The lifecycle of the work_struct is tied to the
+ * usb_interface. Before destroying the interface we cancel the
+ * work_struct, so the fact that work_struct is queued and or
+ * running means the interface (and thus, the device) exist and
+ * are referenced.
+ */
+void usb_queue_reset_device(struct usb_interface *iface)
+{
+ schedule_work(&iface->reset_ws);
+}
+EXPORT_SYMBOL_GPL(usb_queue_reset_device);
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 6d1048faf08..de51667dd64 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -18,6 +18,8 @@
#include "hcd.h" /* for usbcore internals */
#include "usb.h"
+static void cancel_async_set_config(struct usb_device *udev);
+
struct api_context {
struct completion done;
int status;
@@ -139,9 +141,9 @@ int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
dr->bRequestType = requesttype;
dr->bRequest = request;
- dr->wValue = cpu_to_le16p(&value);
- dr->wIndex = cpu_to_le16p(&index);
- dr->wLength = cpu_to_le16p(&size);
+ dr->wValue = cpu_to_le16(value);
+ dr->wIndex = cpu_to_le16(index);
+ dr->wLength = cpu_to_le16(size);
/* dbg("usb_control_msg"); */
@@ -1004,6 +1006,34 @@ int usb_clear_halt(struct usb_device *dev, int pipe)
}
EXPORT_SYMBOL_GPL(usb_clear_halt);
+static int create_intf_ep_devs(struct usb_interface *intf)
+{
+ struct usb_device *udev = interface_to_usbdev(intf);
+ struct usb_host_interface *alt = intf->cur_altsetting;
+ int i;
+
+ if (intf->ep_devs_created || intf->unregistering)
+ return 0;
+
+ for (i = 0; i < alt->desc.bNumEndpoints; ++i)
+ (void) usb_create_ep_devs(&intf->dev, &alt->endpoint[i], udev);
+ intf->ep_devs_created = 1;
+ return 0;
+}
+
+static void remove_intf_ep_devs(struct usb_interface *intf)
+{
+ struct usb_host_interface *alt = intf->cur_altsetting;
+ int i;
+
+ if (!intf->ep_devs_created)
+ return;
+
+ for (i = 0; i < alt->desc.bNumEndpoints; ++i)
+ usb_remove_ep_devs(&alt->endpoint[i]);
+ intf->ep_devs_created = 0;
+}
+
/**
* usb_disable_endpoint -- Disable an endpoint by address
* @dev: the device whose endpoint is being disabled
@@ -1092,7 +1122,7 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0)
dev_dbg(&dev->dev, "unregistering interface %s\n",
dev_name(&interface->dev));
interface->unregistering = 1;
- usb_remove_sysfs_intf_files(interface);
+ remove_intf_ep_devs(interface);
device_del(&interface->dev);
}
@@ -1113,22 +1143,26 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0)
* usb_enable_endpoint - Enable an endpoint for USB communications
* @dev: the device whose interface is being enabled
* @ep: the endpoint
+ * @reset_toggle: flag to set the endpoint's toggle back to 0
*
- * Resets the endpoint toggle, and sets dev->ep_{in,out} pointers.
+ * Resets the endpoint toggle if asked, and sets dev->ep_{in,out} pointers.
* For control endpoints, both the input and output sides are handled.
*/
-void usb_enable_endpoint(struct usb_device *dev, struct usb_host_endpoint *ep)
+void usb_enable_endpoint(struct usb_device *dev, struct usb_host_endpoint *ep,
+ bool reset_toggle)
{
int epnum = usb_endpoint_num(&ep->desc);
int is_out = usb_endpoint_dir_out(&ep->desc);
int is_control = usb_endpoint_xfer_control(&ep->desc);
if (is_out || is_control) {
- usb_settoggle(dev, epnum, 1, 0);
+ if (reset_toggle)
+ usb_settoggle(dev, epnum, 1, 0);
dev->ep_out[epnum] = ep;
}
if (!is_out || is_control) {
- usb_settoggle(dev, epnum, 0, 0);
+ if (reset_toggle)
+ usb_settoggle(dev, epnum, 0, 0);
dev->ep_in[epnum] = ep;
}
ep->enabled = 1;
@@ -1138,17 +1172,18 @@ void usb_enable_endpoint(struct usb_device *dev, struct usb_host_endpoint *ep)
* usb_enable_interface - Enable all the endpoints for an interface
* @dev: the device whose interface is being enabled
* @intf: pointer to the interface descriptor
+ * @reset_toggles: flag to set the endpoints' toggles back to 0
*
* Enables all the endpoints for the interface's current altsetting.
*/
-static void usb_enable_interface(struct usb_device *dev,
- struct usb_interface *intf)
+void usb_enable_interface(struct usb_device *dev,
+ struct usb_interface *intf, bool reset_toggles)
{
struct usb_host_interface *alt = intf->cur_altsetting;
int i;
for (i = 0; i < alt->desc.bNumEndpoints; ++i)
- usb_enable_endpoint(dev, &alt->endpoint[i]);
+ usb_enable_endpoint(dev, &alt->endpoint[i], reset_toggles);
}
/**
@@ -1235,8 +1270,10 @@ int usb_set_interface(struct usb_device *dev, int interface, int alternate)
*/
/* prevent submissions using previous endpoint settings */
- if (iface->cur_altsetting != alt)
+ if (iface->cur_altsetting != alt) {
+ remove_intf_ep_devs(iface);
usb_remove_sysfs_intf_files(iface);
+ }
usb_disable_interface(dev, iface);
iface->cur_altsetting = alt;
@@ -1271,10 +1308,11 @@ int usb_set_interface(struct usb_device *dev, int interface, int alternate)
* during the SETUP stage - hence EP0 toggles are "don't care" here.
* (Likewise, EP0 never "halts" on well designed devices.)
*/
- usb_enable_interface(dev, iface);
- if (device_is_registered(&iface->dev))
+ usb_enable_interface(dev, iface, true);
+ if (device_is_registered(&iface->dev)) {
usb_create_sysfs_intf_files(iface);
-
+ create_intf_ep_devs(iface);
+ }
return 0;
}
EXPORT_SYMBOL_GPL(usb_set_interface);
@@ -1334,7 +1372,6 @@ int usb_reset_configuration(struct usb_device *dev)
struct usb_interface *intf = config->interface[i];
struct usb_host_interface *alt;
- usb_remove_sysfs_intf_files(intf);
alt = usb_altnum_to_altsetting(intf, 0);
/* No altsetting 0? We'll assume the first altsetting.
@@ -1345,10 +1382,16 @@ int usb_reset_configuration(struct usb_device *dev)
if (!alt)
alt = &intf->altsetting[0];
+ if (alt != intf->cur_altsetting) {
+ remove_intf_ep_devs(intf);
+ usb_remove_sysfs_intf_files(intf);
+ }
intf->cur_altsetting = alt;
- usb_enable_interface(dev, intf);
- if (device_is_registered(&intf->dev))
+ usb_enable_interface(dev, intf, true);
+ if (device_is_registered(&intf->dev)) {
usb_create_sysfs_intf_files(intf);
+ create_intf_ep_devs(intf);
+ }
}
return 0;
}
@@ -1441,6 +1484,46 @@ static struct usb_interface_assoc_descriptor *find_iad(struct usb_device *dev,
return retval;
}
+
+/*
+ * Internal function to queue a device reset
+ *
+ * This is initialized into the workstruct in 'struct
+ * usb_device->reset_ws' that is launched by
+ * message.c:usb_set_configuration() when initializing each 'struct
+ * usb_interface'.
+ *
+ * It is safe to get the USB device without reference counts because
+ * the life cycle of @iface is bound to the life cycle of @udev. Then,
+ * this function will be ran only if @iface is alive (and before
+ * freeing it any scheduled instances of it will have been cancelled).
+ *
+ * We need to set a flag (usb_dev->reset_running) because when we call
+ * the reset, the interfaces might be unbound. The current interface
+ * cannot try to remove the queued work as it would cause a deadlock
+ * (you cannot remove your work from within your executing
+ * workqueue). This flag lets it know, so that
+ * usb_cancel_queued_reset() doesn't try to do it.
+ *
+ * See usb_queue_reset_device() for more details
+ */
+void __usb_queue_reset_device(struct work_struct *ws)
+{
+ int rc;
+ struct usb_interface *iface =
+ container_of(ws, struct usb_interface, reset_ws);
+ struct usb_device *udev = interface_to_usbdev(iface);
+
+ rc = usb_lock_device_for_reset(udev, iface);
+ if (rc >= 0) {
+ iface->reset_running = 1;
+ usb_reset_device(udev);
+ iface->reset_running = 0;
+ usb_unlock_device(udev);
+ }
+}
+
+
/*
* usb_set_configuration - Makes a particular device setting be current
* @dev: the device whose configuration is being updated
@@ -1560,6 +1643,9 @@ free_interfaces:
if (dev->state != USB_STATE_ADDRESS)
usb_disable_device(dev, 1); /* Skip ep0 */
+ /* Get rid of pending async Set-Config requests for this device */
+ cancel_async_set_config(dev);
+
ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
USB_REQ_SET_CONFIGURATION, 0, configuration, 0,
NULL, 0, USB_CTRL_SET_TIMEOUT);
@@ -1604,13 +1690,14 @@ free_interfaces:
alt = &intf->altsetting[0];
intf->cur_altsetting = alt;
- usb_enable_interface(dev, intf);
+ usb_enable_interface(dev, intf, true);
intf->dev.parent = &dev->dev;
intf->dev.driver = NULL;
intf->dev.bus = &usb_bus_type;
intf->dev.type = &usb_if_device_type;
intf->dev.groups = usb_interface_groups;
intf->dev.dma_mask = dev->dev.dma_mask;
+ INIT_WORK(&intf->reset_ws, __usb_queue_reset_device);
device_initialize(&intf->dev);
mark_quiesced(intf);
dev_set_name(&intf->dev, "%d-%s:%d.%d",
@@ -1641,17 +1728,21 @@ free_interfaces:
dev_name(&intf->dev), ret);
continue;
}
- usb_create_sysfs_intf_files(intf);
+ create_intf_ep_devs(intf);
}
usb_autosuspend_device(dev);
return 0;
}
+static LIST_HEAD(set_config_list);
+static DEFINE_SPINLOCK(set_config_lock);
+
struct set_config_request {
struct usb_device *udev;
int config;
struct work_struct work;
+ struct list_head node;
};
/* Worker routine for usb_driver_set_configuration() */
@@ -1659,14 +1750,35 @@ static void driver_set_config_work(struct work_struct *work)
{
struct set_config_request *req =
container_of(work, struct set_config_request, work);
+ struct usb_device *udev = req->udev;
- usb_lock_device(req->udev);
- usb_set_configuration(req->udev, req->config);
- usb_unlock_device(req->udev);
- usb_put_dev(req->udev);
+ usb_lock_device(udev);
+ spin_lock(&set_config_lock);
+ list_del(&req->node);
+ spin_unlock(&set_config_lock);
+
+ if (req->config >= -1) /* Is req still valid? */
+ usb_set_configuration(udev, req->config);
+ usb_unlock_device(udev);
+ usb_put_dev(udev);
kfree(req);
}
+/* Cancel pending Set-Config requests for a device whose configuration
+ * was just changed
+ */
+static void cancel_async_set_config(struct usb_device *udev)
+{
+ struct set_config_request *req;
+
+ spin_lock(&set_config_lock);
+ list_for_each_entry(req, &set_config_list, node) {
+ if (req->udev == udev)
+ req->config = -999; /* Mark as cancelled */
+ }
+ spin_unlock(&set_config_lock);
+}
+
/**
* usb_driver_set_configuration - Provide a way for drivers to change device configurations
* @udev: the device whose configuration is being updated
@@ -1698,6 +1810,10 @@ int usb_driver_set_configuration(struct usb_device *udev, int config)
req->config = config;
INIT_WORK(&req->work, driver_set_config_work);
+ spin_lock(&set_config_lock);
+ list_add(&req->node, &set_config_list);
+ spin_unlock(&set_config_lock);
+
usb_get_dev(udev);
schedule_work(&req->work);
return 0;
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index 4fb65fdc9dc..4cc2456ef3b 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -359,19 +359,19 @@ set_level(struct device *dev, struct device_attribute *attr,
strncmp(buf, on_string, len) == 0) {
udev->autosuspend_disabled = 1;
udev->autoresume_disabled = 0;
- rc = usb_external_resume_device(udev);
+ rc = usb_external_resume_device(udev, PMSG_USER_RESUME);
} else if (len == sizeof auto_string - 1 &&
strncmp(buf, auto_string, len) == 0) {
udev->autosuspend_disabled = 0;
udev->autoresume_disabled = 0;
- rc = usb_external_resume_device(udev);
+ rc = usb_external_resume_device(udev, PMSG_USER_RESUME);
} else if (len == sizeof suspend_string - 1 &&
strncmp(buf, suspend_string, len) == 0) {
udev->autosuspend_disabled = 0;
udev->autoresume_disabled = 1;
- rc = usb_external_suspend_device(udev, PMSG_SUSPEND);
+ rc = usb_external_suspend_device(udev, PMSG_USER_SUSPEND);
} else
rc = -EINVAL;
@@ -629,9 +629,6 @@ int usb_create_sysfs_dev_files(struct usb_device *udev)
struct device *dev = &udev->dev;
int retval;
- /* Unforunately these attributes cannot be created before
- * the uevent is broadcast.
- */
retval = device_create_bin_file(dev, &dev_bin_attr_descriptors);
if (retval)
goto error;
@@ -643,11 +640,7 @@ int usb_create_sysfs_dev_files(struct usb_device *udev)
retval = add_power_attributes(dev);
if (retval)
goto error;
-
- retval = usb_create_ep_files(dev, &udev->ep0, udev);
- if (retval)
- goto error;
- return 0;
+ return retval;
error:
usb_remove_sysfs_dev_files(udev);
return retval;
@@ -657,7 +650,6 @@ void usb_remove_sysfs_dev_files(struct usb_device *udev)
{
struct device *dev = &udev->dev;
- usb_remove_ep_files(&udev->ep0);
remove_power_attributes(dev);
remove_persist_attributes(dev);
device_remove_bin_file(dev, &dev_bin_attr_descriptors);
@@ -812,28 +804,6 @@ struct attribute_group *usb_interface_groups[] = {
NULL
};
-static inline void usb_create_intf_ep_files(struct usb_interface *intf,
- struct usb_device *udev)
-{
- struct usb_host_interface *iface_desc;
- int i;
-
- iface_desc = intf->cur_altsetting;
- for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i)
- usb_create_ep_files(&intf->dev, &iface_desc->endpoint[i],
- udev);
-}
-
-static inline void usb_remove_intf_ep_files(struct usb_interface *intf)
-{
- struct usb_host_interface *iface_desc;
- int i;
-
- iface_desc = intf->cur_altsetting;
- for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i)
- usb_remove_ep_files(&iface_desc->endpoint[i]);
-}
-
int usb_create_sysfs_intf_files(struct usb_interface *intf)
{
struct usb_device *udev = interface_to_usbdev(intf);
@@ -843,26 +813,19 @@ int usb_create_sysfs_intf_files(struct usb_interface *intf)
if (intf->sysfs_files_created || intf->unregistering)
return 0;
- /* The interface string may be present in some altsettings
- * and missing in others. Hence its attribute cannot be created
- * before the uevent is broadcast.
- */
if (alt->string == NULL)
alt->string = usb_cache_string(udev, alt->desc.iInterface);
if (alt->string)
retval = device_create_file(&intf->dev, &dev_attr_interface);
- usb_create_intf_ep_files(intf, udev);
intf->sysfs_files_created = 1;
return 0;
}
void usb_remove_sysfs_intf_files(struct usb_interface *intf)
{
- struct device *dev = &intf->dev;
-
if (!intf->sysfs_files_created)
return;
- usb_remove_intf_ep_files(intf);
- device_remove_file(dev, &dev_attr_interface);
+
+ device_remove_file(&intf->dev, &dev_attr_interface);
intf->sysfs_files_created = 0;
}
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
index 1f68af9db3f..58bc5e3c256 100644
--- a/drivers/usb/core/urb.c
+++ b/drivers/usb/core/urb.c
@@ -10,7 +10,6 @@
#define to_urb(d) container_of(d, struct urb, kref)
-static DEFINE_SPINLOCK(usb_reject_lock);
static void urb_destroy(struct kref *kref)
{
@@ -131,9 +130,7 @@ void usb_anchor_urb(struct urb *urb, struct usb_anchor *anchor)
urb->anchor = anchor;
if (unlikely(anchor->poisoned)) {
- spin_lock(&usb_reject_lock);
- urb->reject++;
- spin_unlock(&usb_reject_lock);
+ atomic_inc(&urb->reject);
}
spin_unlock_irqrestore(&anchor->lock, flags);
@@ -565,16 +562,12 @@ void usb_kill_urb(struct urb *urb)
might_sleep();
if (!(urb && urb->dev && urb->ep))
return;
- spin_lock_irq(&usb_reject_lock);
- ++urb->reject;
- spin_unlock_irq(&usb_reject_lock);
+ atomic_inc(&urb->reject);
usb_hcd_unlink_urb(urb, -ENOENT);
wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0);
- spin_lock_irq(&usb_reject_lock);
- --urb->reject;
- spin_unlock_irq(&usb_reject_lock);
+ atomic_dec(&urb->reject);
}
EXPORT_SYMBOL_GPL(usb_kill_urb);
@@ -606,9 +599,7 @@ void usb_poison_urb(struct urb *urb)
might_sleep();
if (!(urb && urb->dev && urb->ep))
return;
- spin_lock_irq(&usb_reject_lock);
- ++urb->reject;
- spin_unlock_irq(&usb_reject_lock);
+ atomic_inc(&urb->reject);
usb_hcd_unlink_urb(urb, -ENOENT);
wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0);
@@ -617,14 +608,10 @@ EXPORT_SYMBOL_GPL(usb_poison_urb);
void usb_unpoison_urb(struct urb *urb)
{
- unsigned long flags;
-
if (!urb)
return;
- spin_lock_irqsave(&usb_reject_lock, flags);
- --urb->reject;
- spin_unlock_irqrestore(&usb_reject_lock, flags);
+ atomic_dec(&urb->reject);
}
EXPORT_SYMBOL_GPL(usb_unpoison_urb);
@@ -691,6 +678,26 @@ void usb_poison_anchored_urbs(struct usb_anchor *anchor)
}
EXPORT_SYMBOL_GPL(usb_poison_anchored_urbs);
+/**
+ * usb_unpoison_anchored_urbs - let an anchor be used successfully again
+ * @anchor: anchor the requests are bound to
+ *
+ * Reverses the effect of usb_poison_anchored_urbs
+ * the anchor can be used normally after it returns
+ */
+void usb_unpoison_anchored_urbs(struct usb_anchor *anchor)
+{
+ unsigned long flags;
+ struct urb *lazarus;
+
+ spin_lock_irqsave(&anchor->lock, flags);
+ list_for_each_entry(lazarus, &anchor->urb_list, anchor_list) {
+ usb_unpoison_urb(lazarus);
+ }
+ anchor->poisoned = 0;
+ spin_unlock_irqrestore(&anchor->lock, flags);
+}
+EXPORT_SYMBOL_GPL(usb_unpoison_anchored_urbs);
/**
* usb_unlink_anchored_urbs - asynchronously cancel transfer requests en masse
* @anchor: anchor the requests are bound to
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 399e15fc505..dcfc072630c 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -253,7 +253,7 @@ static int usb_dev_prepare(struct device *dev)
static void usb_dev_complete(struct device *dev)
{
/* Currently used only for rebinding interfaces */
- usb_resume(dev); /* Implement eventually? */
+ usb_resume(dev, PMSG_RESUME); /* Message event is meaningless */
}
static int usb_dev_suspend(struct device *dev)
@@ -263,7 +263,7 @@ static int usb_dev_suspend(struct device *dev)
static int usb_dev_resume(struct device *dev)
{
- return usb_resume(dev);
+ return usb_resume(dev, PMSG_RESUME);
}
static int usb_dev_freeze(struct device *dev)
@@ -273,7 +273,7 @@ static int usb_dev_freeze(struct device *dev)
static int usb_dev_thaw(struct device *dev)
{
- return usb_resume(dev);
+ return usb_resume(dev, PMSG_THAW);
}
static int usb_dev_poweroff(struct device *dev)
@@ -283,7 +283,7 @@ static int usb_dev_poweroff(struct device *dev)
static int usb_dev_restore(struct device *dev)
{
- return usb_resume(dev);
+ return usb_resume(dev, PMSG_RESTORE);
}
static struct dev_pm_ops usb_device_pm_ops = {
@@ -362,7 +362,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
dev->ep0.desc.bDescriptorType = USB_DT_ENDPOINT;
/* ep0 maxpacket comes later, from device descriptor */
- usb_enable_endpoint(dev, &dev->ep0);
+ usb_enable_endpoint(dev, &dev->ep0, true);
dev->can_submit = 1;
/* Save readable and stable topology id, distinguishing devices
@@ -402,6 +402,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
#ifdef CONFIG_PM
mutex_init(&dev->pm_mutex);
INIT_DELAYED_WORK(&dev->autosuspend, usb_autosuspend_work);
+ INIT_WORK(&dev->autoresume, usb_autoresume_work);
dev->autosuspend_delay = usb_autosuspend_delay * HZ;
dev->connect_time = jiffies;
dev->active_duration = -jiffies;
@@ -513,10 +514,7 @@ EXPORT_SYMBOL_GPL(usb_put_intf);
* disconnect; in some drivers (such as usb-storage) the disconnect()
* or suspend() method will block waiting for a device reset to complete.
*
- * Returns a negative error code for failure, otherwise 1 or 0 to indicate
- * that the device will or will not have to be unlocked. (0 can be
- * returned when an interface is given and is BINDING, because in that
- * case the driver already owns the device lock.)
+ * Returns a negative error code for failure, otherwise 0.
*/
int usb_lock_device_for_reset(struct usb_device *udev,
const struct usb_interface *iface)
@@ -527,16 +525,9 @@ int usb_lock_device_for_reset(struct usb_device *udev,
return -ENODEV;
if (udev->state == USB_STATE_SUSPENDED)
return -EHOSTUNREACH;
- if (iface) {
- switch (iface->condition) {
- case USB_INTERFACE_BINDING:
- return 0;
- case USB_INTERFACE_BOUND:
- break;
- default:
- return -EINTR;
- }
- }
+ if (iface && (iface->condition == USB_INTERFACE_UNBINDING ||
+ iface->condition == USB_INTERFACE_UNBOUND))
+ return -EINTR;
while (usb_trylock_device(udev) != 0) {
@@ -550,10 +541,11 @@ int usb_lock_device_for_reset(struct usb_device *udev,
return -ENODEV;
if (udev->state == USB_STATE_SUSPENDED)
return -EHOSTUNREACH;
- if (iface && iface->condition != USB_INTERFACE_BOUND)
+ if (iface && (iface->condition == USB_INTERFACE_UNBINDING ||
+ iface->condition == USB_INTERFACE_UNBOUND))
return -EINTR;
}
- return 1;
+ return 0;
}
EXPORT_SYMBOL_GPL(usb_lock_device_for_reset);
@@ -962,8 +954,12 @@ void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in,
}
EXPORT_SYMBOL_GPL(usb_buffer_unmap_sg);
-/* format to disable USB on kernel command line is: nousb */
-__module_param_call("", nousb, param_set_bool, param_get_bool, &nousb, 0444);
+/* To disable USB, kernel command line is 'nousb' not 'usbcore.nousb' */
+#ifdef MODULE
+module_param(nousb, bool, 0444);
+#else
+core_param(nousb, nousb, bool, 0444);
+#endif
/*
* for external read access to
@@ -974,6 +970,37 @@ int usb_disabled(void)
}
EXPORT_SYMBOL_GPL(usb_disabled);
+/*
+ * Notifications of device and interface registration
+ */
+static int usb_bus_notify(struct notifier_block *nb, unsigned long action,
+ void *data)
+{
+ struct device *dev = data;
+
+ switch (action) {
+ case BUS_NOTIFY_ADD_DEVICE:
+ if (dev->type == &usb_device_type)
+ (void) usb_create_sysfs_dev_files(to_usb_device(dev));
+ else if (dev->type == &usb_if_device_type)
+ (void) usb_create_sysfs_intf_files(
+ to_usb_interface(dev));
+ break;
+
+ case BUS_NOTIFY_DEL_DEVICE:
+ if (dev->type == &usb_device_type)
+ usb_remove_sysfs_dev_files(to_usb_device(dev));
+ else if (dev->type == &usb_if_device_type)
+ usb_remove_sysfs_intf_files(to_usb_interface(dev));
+ break;
+ }
+ return 0;
+}
+
+static struct notifier_block usb_bus_nb = {
+ .notifier_call = usb_bus_notify,
+};
+
/*
* Init
*/
@@ -991,6 +1018,9 @@ static int __init usb_init(void)
retval = bus_register(&usb_bus_type);
if (retval)
goto bus_register_failed;
+ retval = bus_register_notifier(&usb_bus_type, &usb_bus_nb);
+ if (retval)
+ goto bus_notifier_failed;
retval = usb_host_init();
if (retval)
goto host_init_failed;
@@ -1025,6 +1055,8 @@ driver_register_failed:
major_init_failed:
usb_host_cleanup();
host_init_failed:
+ bus_unregister_notifier(&usb_bus_type, &usb_bus_nb);
+bus_notifier_failed:
bus_unregister(&usb_bus_type);
bus_register_failed:
ksuspend_usb_cleanup();
@@ -1048,6 +1080,7 @@ static void __exit usb_exit(void)
usb_devio_cleanup();
usb_hub_cleanup();
usb_host_cleanup();
+ bus_unregister_notifier(&usb_bus_type, &usb_bus_nb);
bus_unregister(&usb_bus_type);
ksuspend_usb_cleanup();
}
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index 9a1a45ac3ad..386177867a8 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -1,16 +1,20 @@
+#include
+
/* Functions local to drivers/usb/core/ */
extern int usb_create_sysfs_dev_files(struct usb_device *dev);
extern void usb_remove_sysfs_dev_files(struct usb_device *dev);
extern int usb_create_sysfs_intf_files(struct usb_interface *intf);
extern void usb_remove_sysfs_intf_files(struct usb_interface *intf);
-extern int usb_create_ep_files(struct device *parent,
+extern int usb_create_ep_devs(struct device *parent,
struct usb_host_endpoint *endpoint,
struct usb_device *udev);
-extern void usb_remove_ep_files(struct usb_host_endpoint *endpoint);
+extern void usb_remove_ep_devs(struct usb_host_endpoint *endpoint);
extern void usb_enable_endpoint(struct usb_device *dev,
- struct usb_host_endpoint *ep);
+ struct usb_host_endpoint *ep, bool reset_toggle);
+extern void usb_enable_interface(struct usb_device *dev,
+ struct usb_interface *intf, bool reset_toggles);
extern void usb_disable_endpoint(struct usb_device *dev, unsigned int epaddr);
extern void usb_disable_interface(struct usb_device *dev,
struct usb_interface *intf);
@@ -42,14 +46,16 @@ extern void usb_host_cleanup(void);
#ifdef CONFIG_PM
extern int usb_suspend(struct device *dev, pm_message_t msg);
-extern int usb_resume(struct device *dev);
+extern int usb_resume(struct device *dev, pm_message_t msg);
extern void usb_autosuspend_work(struct work_struct *work);
-extern int usb_port_suspend(struct usb_device *dev);
-extern int usb_port_resume(struct usb_device *dev);
+extern void usb_autoresume_work(struct work_struct *work);
+extern int usb_port_suspend(struct usb_device *dev, pm_message_t msg);
+extern int usb_port_resume(struct usb_device *dev, pm_message_t msg);
extern int usb_external_suspend_device(struct usb_device *udev,
pm_message_t msg);
-extern int usb_external_resume_device(struct usb_device *udev);
+extern int usb_external_resume_device(struct usb_device *udev,
+ pm_message_t msg);
static inline void usb_pm_lock(struct usb_device *udev)
{
@@ -63,12 +69,12 @@ static inline void usb_pm_unlock(struct usb_device *udev)
#else
-static inline int usb_port_suspend(struct usb_device *udev)
+static inline int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
{
return 0;
}
-static inline int usb_port_resume(struct usb_device *udev)
+static inline int usb_port_resume(struct usb_device *udev, pm_message_t msg)
{
return 0;
}
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index dd4cd5a5137..3219d137340 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -297,13 +297,34 @@ config USB_S3C2410_DEBUG
# musb builds in ../musb along with host support
config USB_GADGET_MUSB_HDRC
- boolean "Inventra HDRC USB Peripheral (TI, ...)"
+ boolean "Inventra HDRC USB Peripheral (TI, ADI, ...)"
depends on USB_MUSB_HDRC && (USB_MUSB_PERIPHERAL || USB_MUSB_OTG)
select USB_GADGET_DUALSPEED
select USB_GADGET_SELECTED
help
This OTG-capable silicon IP is used in dual designs including
- the TI DaVinci, OMAP 243x, OMAP 343x, and TUSB 6010.
+ the TI DaVinci, OMAP 243x, OMAP 343x, TUSB 6010, and ADI Blackfin
+
+config USB_GADGET_IMX
+ boolean "Freescale IMX USB Peripheral Controller"
+ depends on ARCH_MX1
+ help
+ Freescale's IMX series include an integrated full speed
+ USB 1.1 device controller. The controller in the IMX series
+ is register-compatible.
+
+ It has Six fixed-function endpoints, as well as endpoint
+ zero (for control transfers).
+
+ Say "y" to link the driver statically, or "m" to build a
+ dynamically linked module called "imx_udc" and force all
+ gadget drivers to also be dynamically linked.
+
+config USB_IMX
+ tristate
+ depends on USB_GADGET_IMX
+ default USB_GADGET
+ select USB_GADGET_SELECTED
config USB_GADGET_M66592
boolean "Renesas M66592 USB Peripheral Controller"
@@ -377,6 +398,24 @@ config USB_FSL_QE
default USB_GADGET
select USB_GADGET_SELECTED
+config USB_GADGET_CI13XXX
+ boolean "MIPS USB CI13xxx"
+ depends on PCI
+ select USB_GADGET_DUALSPEED
+ help
+ MIPS USB IP core family device controller
+ Currently it only supports IP part number CI13412
+
+ Say "y" to link the driver statically, or "m" to build a
+ dynamically linked module called "ci13xxx_udc" and force all
+ gadget drivers to also be dynamically linked.
+
+config USB_CI13XXX
+ tristate
+ depends on USB_GADGET_CI13XXX
+ default USB_GADGET
+ select USB_GADGET_SELECTED
+
config USB_GADGET_NET2280
boolean "NetChip 228x"
depends on PCI
diff --git a/drivers/usb/gadget/Makefile b/drivers/usb/gadget/Makefile
index bd4041b47dc..39a51d746cb 100644
--- a/drivers/usb/gadget/Makefile
+++ b/drivers/usb/gadget/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_USB_NET2280) += net2280.o
obj-$(CONFIG_USB_AMD5536UDC) += amd5536udc.o
obj-$(CONFIG_USB_PXA25X) += pxa25x_udc.o
obj-$(CONFIG_USB_PXA27X) += pxa27x_udc.o
+obj-$(CONFIG_USB_IMX) += imx_udc.o
obj-$(CONFIG_USB_GOKU) += goku_udc.o
obj-$(CONFIG_USB_OMAP) += omap_udc.o
obj-$(CONFIG_USB_LH7A40X) += lh7a40x_udc.o
@@ -19,6 +20,7 @@ obj-$(CONFIG_USB_ATMEL_USBA) += atmel_usba_udc.o
obj-$(CONFIG_USB_FSL_USB2) += fsl_usb2_udc.o
obj-$(CONFIG_USB_M66592) += m66592-udc.o
obj-$(CONFIG_USB_FSL_QE) += fsl_qe_udc.o
+obj-$(CONFIG_USB_CI13XXX) += ci13xxx_udc.o
#
# USB gadget drivers
diff --git a/drivers/usb/gadget/ci13xxx_udc.c b/drivers/usb/gadget/ci13xxx_udc.c
new file mode 100644
index 00000000000..bebf911c7e5
--- /dev/null
+++ b/drivers/usb/gadget/ci13xxx_udc.c
@@ -0,0 +1,2830 @@
+/*
+ * ci13xxx_udc.c - MIPS USB IP core family device controller
+ *
+ * Copyright (C) 2008 Chipidea - MIPS Technologies, Inc. All rights reserved.
+ *
+ * Author: David Lopo
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * Description: MIPS USB IP core family device controller
+ * Currently it only supports IP part number CI13412
+ *
+ * This driver is composed of several blocks:
+ * - HW: hardware interface
+ * - DBG: debug facilities (optional)
+ * - UTIL: utilities
+ * - ISR: interrupts handling
+ * - ENDPT: endpoint operations (Gadget API)
+ * - GADGET: gadget operations (Gadget API)
+ * - BUS: bus glue code, bus abstraction layer
+ * - PCI: PCI core interface and PCI resources (interrupts, memory...)
+ *
+ * Compile Options
+ * - CONFIG_USB_GADGET_DEBUG_FILES: enable debug facilities
+ * - STALL_IN: non-empty bulk-in pipes cannot be halted
+ * if defined mass storage compliance succeeds but with warnings
+ * => case 4: Hi > Dn
+ * => case 5: Hi > Di
+ * => case 8: Hi <> Do
+ * if undefined usbtest 13 fails
+ * - TRACE: enable function tracing (depends on DEBUG)
+ *
+ * Main Features
+ * - Chapter 9 & Mass Storage Compliance with Gadget File Storage
+ * - Chapter 9 Compliance with Gadget Zero (STALL_IN undefined)
+ * - Normal & LPM support
+ *
+ * USBTEST Report
+ * - OK: 0-12, 13 (STALL_IN defined) & 14
+ * - Not Supported: 15 & 16 (ISO)
+ *
+ * TODO List
+ * - OTG
+ * - Isochronous & Interrupt Traffic
+ * - Handle requests which spawns into several TDs
+ * - GET_STATUS(device) - always reports 0
+ * - Gadget API (majority of optional features)
+ * - Suspend & Remote Wakeup
+ */
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include "ci13xxx_udc.h"
+
+
+/******************************************************************************
+ * DEFINE
+ *****************************************************************************/
+/* ctrl register bank access */
+static DEFINE_SPINLOCK(udc_lock);
+
+/* driver name */
+#define UDC_DRIVER_NAME "ci13xxx_udc"
+
+/* control endpoint description */
+static const struct usb_endpoint_descriptor
+ctrl_endpt_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
+ .wMaxPacketSize = cpu_to_le16(CTRL_PAYLOAD_MAX),
+};
+
+/* UDC descriptor */
+static struct ci13xxx *_udc;
+
+/* Interrupt statistics */
+#define ISR_MASK 0x1F
+static struct {
+ u32 test;
+ u32 ui;
+ u32 uei;
+ u32 pci;
+ u32 uri;
+ u32 sli;
+ u32 none;
+ struct {
+ u32 cnt;
+ u32 buf[ISR_MASK+1];
+ u32 idx;
+ } hndl;
+} isr_statistics;
+
+/**
+ * ffs_nr: find first (least significant) bit set
+ * @x: the word to search
+ *
+ * This function returns bit number (instead of position)
+ */
+static int ffs_nr(u32 x)
+{
+ int n = ffs(x);
+
+ return n ? n-1 : 32;
+}
+
+/******************************************************************************
+ * HW block
+ *****************************************************************************/
+/* register bank descriptor */
+static struct {
+ unsigned lpm; /* is LPM? */
+ void __iomem *abs; /* bus map offset */
+ void __iomem *cap; /* bus map offset + CAP offset + CAP data */
+ size_t size; /* bank size */
+} hw_bank;
+
+/* UDC register map */
+#define ABS_CAPLENGTH (0x100UL)
+#define ABS_HCCPARAMS (0x108UL)
+#define ABS_DCCPARAMS (0x124UL)
+#define ABS_TESTMODE (hw_bank.lpm ? 0x0FCUL : 0x138UL)
+/* offset to CAPLENTGH (addr + data) */
+#define CAP_USBCMD (0x000UL)
+#define CAP_USBSTS (0x004UL)
+#define CAP_USBINTR (0x008UL)
+#define CAP_DEVICEADDR (0x014UL)
+#define CAP_ENDPTLISTADDR (0x018UL)
+#define CAP_PORTSC (0x044UL)
+#define CAP_DEVLC (0x0B4UL)
+#define CAP_USBMODE (hw_bank.lpm ? 0x0C8UL : 0x068UL)
+#define CAP_ENDPTSETUPSTAT (hw_bank.lpm ? 0x0D8UL : 0x06CUL)
+#define CAP_ENDPTPRIME (hw_bank.lpm ? 0x0DCUL : 0x070UL)
+#define CAP_ENDPTFLUSH (hw_bank.lpm ? 0x0E0UL : 0x074UL)
+#define CAP_ENDPTSTAT (hw_bank.lpm ? 0x0E4UL : 0x078UL)
+#define CAP_ENDPTCOMPLETE (hw_bank.lpm ? 0x0E8UL : 0x07CUL)
+#define CAP_ENDPTCTRL (hw_bank.lpm ? 0x0ECUL : 0x080UL)
+#define CAP_LAST (hw_bank.lpm ? 0x12CUL : 0x0C0UL)
+
+/* maximum number of enpoints: valid only after hw_device_reset() */
+static unsigned hw_ep_max;
+
+/**
+ * hw_ep_bit: calculates the bit number
+ * @num: endpoint number
+ * @dir: endpoint direction
+ *
+ * This function returns bit number
+ */
+static inline int hw_ep_bit(int num, int dir)
+{
+ return num + (dir ? 16 : 0);
+}
+
+/**
+ * hw_aread: reads from register bitfield
+ * @addr: address relative to bus map
+ * @mask: bitfield mask
+ *
+ * This function returns register bitfield data
+ */
+static u32 hw_aread(u32 addr, u32 mask)
+{
+ return ioread32(addr + hw_bank.abs) & mask;
+}
+
+/**
+ * hw_awrite: writes to register bitfield
+ * @addr: address relative to bus map
+ * @mask: bitfield mask
+ * @data: new data
+ */
+static void hw_awrite(u32 addr, u32 mask, u32 data)
+{
+ iowrite32(hw_aread(addr, ~mask) | (data & mask),
+ addr + hw_bank.abs);
+}
+
+/**
+ * hw_cread: reads from register bitfield
+ * @addr: address relative to CAP offset plus content
+ * @mask: bitfield mask
+ *
+ * This function returns register bitfield data
+ */
+static u32 hw_cread(u32 addr, u32 mask)
+{
+ return ioread32(addr + hw_bank.cap) & mask;
+}
+
+/**
+ * hw_cwrite: writes to register bitfield
+ * @addr: address relative to CAP offset plus content
+ * @mask: bitfield mask
+ * @data: new data
+ */
+static void hw_cwrite(u32 addr, u32 mask, u32 data)
+{
+ iowrite32(hw_cread(addr, ~mask) | (data & mask),
+ addr + hw_bank.cap);
+}
+
+/**
+ * hw_ctest_and_clear: tests & clears register bitfield
+ * @addr: address relative to CAP offset plus content
+ * @mask: bitfield mask
+ *
+ * This function returns register bitfield data
+ */
+static u32 hw_ctest_and_clear(u32 addr, u32 mask)
+{
+ u32 reg = hw_cread(addr, mask);
+
+ iowrite32(reg, addr + hw_bank.cap);
+ return reg;
+}
+
+/**
+ * hw_ctest_and_write: tests & writes register bitfield
+ * @addr: address relative to CAP offset plus content
+ * @mask: bitfield mask
+ * @data: new data
+ *
+ * This function returns register bitfield data
+ */
+static u32 hw_ctest_and_write(u32 addr, u32 mask, u32 data)
+{
+ u32 reg = hw_cread(addr, ~0);
+
+ iowrite32((reg & ~mask) | (data & mask), addr + hw_bank.cap);
+ return (reg & mask) >> ffs_nr(mask);
+}
+
+/**
+ * hw_device_reset: resets chip (execute without interruption)
+ * @base: register base address
+ *
+ * This function returns an error code
+ */
+static int hw_device_reset(void __iomem *base)
+{
+ u32 reg;
+
+ /* bank is a module variable */
+ hw_bank.abs = base;
+
+ hw_bank.cap = hw_bank.abs;
+ hw_bank.cap += ABS_CAPLENGTH;
+ hw_bank.cap += ioread8(hw_bank.cap);
+
+ reg = hw_aread(ABS_HCCPARAMS, HCCPARAMS_LEN) >> ffs_nr(HCCPARAMS_LEN);
+ hw_bank.lpm = reg;
+ hw_bank.size = hw_bank.cap - hw_bank.abs;
+ hw_bank.size += CAP_LAST;
+ hw_bank.size /= sizeof(u32);
+
+ /* should flush & stop before reset */
+ hw_cwrite(CAP_ENDPTFLUSH, ~0, ~0);
+ hw_cwrite(CAP_USBCMD, USBCMD_RS, 0);
+
+ hw_cwrite(CAP_USBCMD, USBCMD_RST, USBCMD_RST);
+ while (hw_cread(CAP_USBCMD, USBCMD_RST))
+ udelay(10); /* not RTOS friendly */
+
+ /* USBMODE should be configured step by step */
+ hw_cwrite(CAP_USBMODE, USBMODE_CM, USBMODE_CM_IDLE);
+ hw_cwrite(CAP_USBMODE, USBMODE_CM, USBMODE_CM_DEVICE);
+ hw_cwrite(CAP_USBMODE, USBMODE_SLOM, USBMODE_SLOM); /* HW >= 2.3 */
+
+ if (hw_cread(CAP_USBMODE, USBMODE_CM) != USBMODE_CM_DEVICE) {
+ pr_err("cannot enter in device mode");
+ pr_err("lpm = %i", hw_bank.lpm);
+ return -ENODEV;
+ }
+
+ reg = hw_aread(ABS_DCCPARAMS, DCCPARAMS_DEN) >> ffs_nr(DCCPARAMS_DEN);
+ if (reg == 0 || reg > ENDPT_MAX)
+ return -ENODEV;
+
+ hw_ep_max = reg; /* cache hw ENDPT_MAX */
+
+ /* setup lock mode ? */
+
+ /* ENDPTSETUPSTAT is '0' by default */
+
+ /* HCSPARAMS.bf.ppc SHOULD BE zero for device */
+
+ return 0;
+}
+
+/**
+ * hw_device_state: enables/disables interrupts & starts/stops device (execute
+ * without interruption)
+ * @dma: 0 => disable, !0 => enable and set dma engine
+ *
+ * This function returns an error code
+ */
+static int hw_device_state(u32 dma)
+{
+ if (dma) {
+ hw_cwrite(CAP_ENDPTLISTADDR, ~0, dma);
+ /* interrupt, error, port change, reset, sleep/suspend */
+ hw_cwrite(CAP_USBINTR, ~0,
+ USBi_UI|USBi_UEI|USBi_PCI|USBi_URI|USBi_SLI);
+ hw_cwrite(CAP_USBCMD, USBCMD_RS, USBCMD_RS);
+ } else {
+ hw_cwrite(CAP_USBCMD, USBCMD_RS, 0);
+ hw_cwrite(CAP_USBINTR, ~0, 0);
+ }
+ return 0;
+}
+
+/**
+ * hw_ep_flush: flush endpoint fifo (execute without interruption)
+ * @num: endpoint number
+ * @dir: endpoint direction
+ *
+ * This function returns an error code
+ */
+static int hw_ep_flush(int num, int dir)
+{
+ int n = hw_ep_bit(num, dir);
+
+ do {
+ /* flush any pending transfer */
+ hw_cwrite(CAP_ENDPTFLUSH, BIT(n), BIT(n));
+ while (hw_cread(CAP_ENDPTFLUSH, BIT(n)))
+ cpu_relax();
+ } while (hw_cread(CAP_ENDPTSTAT, BIT(n)));
+
+ return 0;
+}
+
+/**
+ * hw_ep_disable: disables endpoint (execute without interruption)
+ * @num: endpoint number
+ * @dir: endpoint direction
+ *
+ * This function returns an error code
+ */
+static int hw_ep_disable(int num, int dir)
+{
+ hw_ep_flush(num, dir);
+ hw_cwrite(CAP_ENDPTCTRL + num * sizeof(u32),
+ dir ? ENDPTCTRL_TXE : ENDPTCTRL_RXE, 0);
+ return 0;
+}
+
+/**
+ * hw_ep_enable: enables endpoint (execute without interruption)
+ * @num: endpoint number
+ * @dir: endpoint direction
+ * @type: endpoint type
+ *
+ * This function returns an error code
+ */
+static int hw_ep_enable(int num, int dir, int type)
+{
+ u32 mask, data;
+
+ if (dir) {
+ mask = ENDPTCTRL_TXT; /* type */
+ data = type << ffs_nr(mask);
+
+ mask |= ENDPTCTRL_TXS; /* unstall */
+ mask |= ENDPTCTRL_TXR; /* reset data toggle */
+ data |= ENDPTCTRL_TXR;
+ mask |= ENDPTCTRL_TXE; /* enable */
+ data |= ENDPTCTRL_TXE;
+ } else {
+ mask = ENDPTCTRL_RXT; /* type */
+ data = type << ffs_nr(mask);
+
+ mask |= ENDPTCTRL_RXS; /* unstall */
+ mask |= ENDPTCTRL_RXR; /* reset data toggle */
+ data |= ENDPTCTRL_RXR;
+ mask |= ENDPTCTRL_RXE; /* enable */
+ data |= ENDPTCTRL_RXE;
+ }
+ hw_cwrite(CAP_ENDPTCTRL + num * sizeof(u32), mask, data);
+ return 0;
+}
+
+/**
+ * hw_ep_get_halt: return endpoint halt status
+ * @num: endpoint number
+ * @dir: endpoint direction
+ *
+ * This function returns 1 if endpoint halted
+ */
+static int hw_ep_get_halt(int num, int dir)
+{
+ u32 mask = dir ? ENDPTCTRL_TXS : ENDPTCTRL_RXS;
+
+ return hw_cread(CAP_ENDPTCTRL + num * sizeof(u32), mask) ? 1 : 0;
+}
+
+/**
+ * hw_ep_is_primed: test if endpoint is primed (execute without interruption)
+ * @num: endpoint number
+ * @dir: endpoint direction
+ *
+ * This function returns true if endpoint primed
+ */
+static int hw_ep_is_primed(int num, int dir)
+{
+ u32 reg = hw_cread(CAP_ENDPTPRIME, ~0) | hw_cread(CAP_ENDPTSTAT, ~0);
+
+ return test_bit(hw_ep_bit(num, dir), (void *)®);
+}
+
+/**
+ * hw_test_and_clear_setup_status: test & clear setup status (execute without
+ * interruption)
+ * @n: bit number (endpoint)
+ *
+ * This function returns setup status
+ */
+static int hw_test_and_clear_setup_status(int n)
+{
+ return hw_ctest_and_clear(CAP_ENDPTSETUPSTAT, BIT(n));
+}
+
+/**
+ * hw_ep_prime: primes endpoint (execute without interruption)
+ * @num: endpoint number
+ * @dir: endpoint direction
+ * @is_ctrl: true if control endpoint
+ *
+ * This function returns an error code
+ */
+static int hw_ep_prime(int num, int dir, int is_ctrl)
+{
+ int n = hw_ep_bit(num, dir);
+
+ /* the caller should flush first */
+ if (hw_ep_is_primed(num, dir))
+ return -EBUSY;
+
+ if (is_ctrl && dir == RX && hw_cread(CAP_ENDPTSETUPSTAT, BIT(num)))
+ return -EAGAIN;
+
+ hw_cwrite(CAP_ENDPTPRIME, BIT(n), BIT(n));
+
+ while (hw_cread(CAP_ENDPTPRIME, BIT(n)))
+ cpu_relax();
+ if (is_ctrl && dir == RX && hw_cread(CAP_ENDPTSETUPSTAT, BIT(num)))
+ return -EAGAIN;
+
+ /* status shoult be tested according with manual but it doesn't work */
+ return 0;
+}
+
+/**
+ * hw_ep_set_halt: configures ep halt & resets data toggle after clear (execute
+ * without interruption)
+ * @num: endpoint number
+ * @dir: endpoint direction
+ * @value: true => stall, false => unstall
+ *
+ * This function returns an error code
+ */
+static int hw_ep_set_halt(int num, int dir, int value)
+{
+ if (value != 0 && value != 1)
+ return -EINVAL;
+
+ do {
+ u32 addr = CAP_ENDPTCTRL + num * sizeof(u32);
+ u32 mask_xs = dir ? ENDPTCTRL_TXS : ENDPTCTRL_RXS;
+ u32 mask_xr = dir ? ENDPTCTRL_TXR : ENDPTCTRL_RXR;
+
+ /* data toggle - reserved for EP0 but it's in ESS */
+ hw_cwrite(addr, mask_xs|mask_xr, value ? mask_xs : mask_xr);
+
+ } while (value != hw_ep_get_halt(num, dir));
+
+ return 0;
+}
+
+/**
+ * hw_intr_clear: disables interrupt & clears interrupt status (execute without
+ * interruption)
+ * @n: interrupt bit
+ *
+ * This function returns an error code
+ */
+static int hw_intr_clear(int n)
+{
+ if (n >= REG_BITS)
+ return -EINVAL;
+
+ hw_cwrite(CAP_USBINTR, BIT(n), 0);
+ hw_cwrite(CAP_USBSTS, BIT(n), BIT(n));
+ return 0;
+}
+
+/**
+ * hw_intr_force: enables interrupt & forces interrupt status (execute without
+ * interruption)
+ * @n: interrupt bit
+ *
+ * This function returns an error code
+ */
+static int hw_intr_force(int n)
+{
+ if (n >= REG_BITS)
+ return -EINVAL;
+
+ hw_awrite(ABS_TESTMODE, TESTMODE_FORCE, TESTMODE_FORCE);
+ hw_cwrite(CAP_USBINTR, BIT(n), BIT(n));
+ hw_cwrite(CAP_USBSTS, BIT(n), BIT(n));
+ hw_awrite(ABS_TESTMODE, TESTMODE_FORCE, 0);
+ return 0;
+}
+
+/**
+ * hw_is_port_high_speed: test if port is high speed
+ *
+ * This function returns true if high speed port
+ */
+static int hw_port_is_high_speed(void)
+{
+ return hw_bank.lpm ? hw_cread(CAP_DEVLC, DEVLC_PSPD) :
+ hw_cread(CAP_PORTSC, PORTSC_HSP);
+}
+
+/**
+ * hw_port_test_get: reads port test mode value
+ *
+ * This function returns port test mode value
+ */
+static u8 hw_port_test_get(void)
+{
+ return hw_cread(CAP_PORTSC, PORTSC_PTC) >> ffs_nr(PORTSC_PTC);
+}
+
+/**
+ * hw_port_test_set: writes port test mode (execute without interruption)
+ * @mode: new value
+ *
+ * This function returns an error code
+ */
+static int hw_port_test_set(u8 mode)
+{
+ const u8 TEST_MODE_MAX = 7;
+
+ if (mode > TEST_MODE_MAX)
+ return -EINVAL;
+
+ hw_cwrite(CAP_PORTSC, PORTSC_PTC, mode << ffs_nr(PORTSC_PTC));
+ return 0;
+}
+
+/**
+ * hw_read_intr_enable: returns interrupt enable register
+ *
+ * This function returns register data
+ */
+static u32 hw_read_intr_enable(void)
+{
+ return hw_cread(CAP_USBINTR, ~0);
+}
+
+/**
+ * hw_read_intr_status: returns interrupt status register
+ *
+ * This function returns register data
+ */
+static u32 hw_read_intr_status(void)
+{
+ return hw_cread(CAP_USBSTS, ~0);
+}
+
+/**
+ * hw_register_read: reads all device registers (execute without interruption)
+ * @buf: destination buffer
+ * @size: buffer size
+ *
+ * This function returns number of registers read
+ */
+static size_t hw_register_read(u32 *buf, size_t size)
+{
+ unsigned i;
+
+ if (size > hw_bank.size)
+ size = hw_bank.size;
+
+ for (i = 0; i < size; i++)
+ buf[i] = hw_aread(i * sizeof(u32), ~0);
+
+ return size;
+}
+
+/**
+ * hw_register_write: writes to register
+ * @addr: register address
+ * @data: register value
+ *
+ * This function returns an error code
+ */
+static int hw_register_write(u16 addr, u32 data)
+{
+ /* align */
+ addr /= sizeof(u32);
+
+ if (addr >= hw_bank.size)
+ return -EINVAL;
+
+ /* align */
+ addr *= sizeof(u32);
+
+ hw_awrite(addr, ~0, data);
+ return 0;
+}
+
+/**
+ * hw_test_and_clear_complete: test & clear complete status (execute without
+ * interruption)
+ * @n: bit number (endpoint)
+ *
+ * This function returns complete status
+ */
+static int hw_test_and_clear_complete(int n)
+{
+ return hw_ctest_and_clear(CAP_ENDPTCOMPLETE, BIT(n));
+}
+
+/**
+ * hw_test_and_clear_intr_active: test & clear active interrupts (execute
+ * without interruption)
+ *
+ * This function returns active interrutps
+ */
+static u32 hw_test_and_clear_intr_active(void)
+{
+ u32 reg = hw_read_intr_status() & hw_read_intr_enable();
+
+ hw_cwrite(CAP_USBSTS, ~0, reg);
+ return reg;
+}
+
+/**
+ * hw_test_and_clear_setup_guard: test & clear setup guard (execute without
+ * interruption)
+ *
+ * This function returns guard value
+ */
+static int hw_test_and_clear_setup_guard(void)
+{
+ return hw_ctest_and_write(CAP_USBCMD, USBCMD_SUTW, 0);
+}
+
+/**
+ * hw_test_and_set_setup_guard: test & set setup guard (execute without
+ * interruption)
+ *
+ * This function returns guard value
+ */
+static int hw_test_and_set_setup_guard(void)
+{
+ return hw_ctest_and_write(CAP_USBCMD, USBCMD_SUTW, USBCMD_SUTW);
+}
+
+/**
+ * hw_usb_set_address: configures USB address (execute without interruption)
+ * @value: new USB address
+ *
+ * This function returns an error code
+ */
+static int hw_usb_set_address(u8 value)
+{
+ /* advance */
+ hw_cwrite(CAP_DEVICEADDR, DEVICEADDR_USBADR | DEVICEADDR_USBADRA,
+ value << ffs_nr(DEVICEADDR_USBADR) | DEVICEADDR_USBADRA);
+ return 0;
+}
+
+/**
+ * hw_usb_reset: restart device after a bus reset (execute without
+ * interruption)
+ *
+ * This function returns an error code
+ */
+static int hw_usb_reset(void)
+{
+ hw_usb_set_address(0);
+
+ /* ESS flushes only at end?!? */
+ hw_cwrite(CAP_ENDPTFLUSH, ~0, ~0); /* flush all EPs */
+
+ /* clear setup token semaphores */
+ hw_cwrite(CAP_ENDPTSETUPSTAT, 0, 0); /* writes its content */
+
+ /* clear complete status */
+ hw_cwrite(CAP_ENDPTCOMPLETE, 0, 0); /* writes its content */
+
+ /* wait until all bits cleared */
+ while (hw_cread(CAP_ENDPTPRIME, ~0))
+ udelay(10); /* not RTOS friendly */
+
+ /* reset all endpoints ? */
+
+ /* reset internal status and wait for further instructions
+ no need to verify the port reset status (ESS does it) */
+
+ return 0;
+}
+
+/******************************************************************************
+ * DBG block
+ *****************************************************************************/
+/**
+ * show_device: prints information about device capabilities and status
+ *
+ * Check "device.h" for details
+ */
+static ssize_t show_device(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
+ struct usb_gadget *gadget = &udc->gadget;
+ int n = 0;
+
+ dbg_trace("[%s] %p\n", __func__, buf);
+ if (attr == NULL || buf == NULL) {
+ dev_err(dev, "[%s] EINVAL\n", __func__);
+ return 0;
+ }
+
+ n += scnprintf(buf + n, PAGE_SIZE - n, "speed = %d\n",
+ gadget->speed);
+ n += scnprintf(buf + n, PAGE_SIZE - n, "is_dualspeed = %d\n",
+ gadget->is_dualspeed);
+ n += scnprintf(buf + n, PAGE_SIZE - n, "is_otg = %d\n",
+ gadget->is_otg);
+ n += scnprintf(buf + n, PAGE_SIZE - n, "is_a_peripheral = %d\n",
+ gadget->is_a_peripheral);
+ n += scnprintf(buf + n, PAGE_SIZE - n, "b_hnp_enable = %d\n",
+ gadget->b_hnp_enable);
+ n += scnprintf(buf + n, PAGE_SIZE - n, "a_hnp_support = %d\n",
+ gadget->a_hnp_support);
+ n += scnprintf(buf + n, PAGE_SIZE - n, "a_alt_hnp_support = %d\n",
+ gadget->a_alt_hnp_support);
+ n += scnprintf(buf + n, PAGE_SIZE - n, "name = %s\n",
+ (gadget->name ? gadget->name : ""));
+
+ return n;
+}
+static DEVICE_ATTR(device, S_IRUSR, show_device, NULL);
+
+/**
+ * show_driver: prints information about attached gadget (if any)
+ *
+ * Check "device.h" for details
+ */
+static ssize_t show_driver(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
+ struct usb_gadget_driver *driver = udc->driver;
+ int n = 0;
+
+ dbg_trace("[%s] %p\n", __func__, buf);
+ if (attr == NULL || buf == NULL) {
+ dev_err(dev, "[%s] EINVAL\n", __func__);
+ return 0;
+ }
+
+ if (driver == NULL)
+ return scnprintf(buf, PAGE_SIZE,
+ "There is no gadget attached!\n");
+
+ n += scnprintf(buf + n, PAGE_SIZE - n, "function = %s\n",
+ (driver->function ? driver->function : ""));
+ n += scnprintf(buf + n, PAGE_SIZE - n, "max speed = %d\n",
+ driver->speed);
+
+ return n;
+}
+static DEVICE_ATTR(driver, S_IRUSR, show_driver, NULL);
+
+/* Maximum event message length */
+#define DBG_DATA_MSG 64UL
+
+/* Maximum event messages */
+#define DBG_DATA_MAX 128UL
+
+/* Event buffer descriptor */
+static struct {
+ char (buf[DBG_DATA_MAX])[DBG_DATA_MSG]; /* buffer */
+ unsigned idx; /* index */
+ unsigned tty; /* print to console? */
+ rwlock_t lck; /* lock */
+} dbg_data = {
+ .idx = 0,
+ .tty = 0,
+ .lck = __RW_LOCK_UNLOCKED(lck)
+};
+
+/**
+ * dbg_dec: decrements debug event index
+ * @idx: buffer index
+ */
+static void dbg_dec(unsigned *idx)
+{
+ *idx = (*idx - 1) & (DBG_DATA_MAX-1);
+}
+
+/**
+ * dbg_inc: increments debug event index
+ * @idx: buffer index
+ */
+static void dbg_inc(unsigned *idx)
+{
+ *idx = (*idx + 1) & (DBG_DATA_MAX-1);
+}
+
+/**
+ * dbg_print: prints the common part of the event
+ * @addr: endpoint address
+ * @name: event name
+ * @status: status
+ * @extra: extra information
+ */
+static void dbg_print(u8 addr, const char *name, int status, const char *extra)
+{
+ struct timeval tval;
+ unsigned int stamp;
+ unsigned long flags;
+
+ write_lock_irqsave(&dbg_data.lck, flags);
+
+ do_gettimeofday(&tval);
+ stamp = tval.tv_sec & 0xFFFF; /* 2^32 = 4294967296. Limit to 4096s */
+ stamp = stamp * 1000000 + tval.tv_usec;
+
+ scnprintf(dbg_data.buf[dbg_data.idx], DBG_DATA_MSG,
+ "%04X\t» %02X %-7.7s %4i «\t%s\n",
+ stamp, addr, name, status, extra);
+
+ dbg_inc(&dbg_data.idx);
+
+ write_unlock_irqrestore(&dbg_data.lck, flags);
+
+ if (dbg_data.tty != 0)
+ pr_notice("%04X\t» %02X %-7.7s %4i «\t%s\n",
+ stamp, addr, name, status, extra);
+}
+
+/**
+ * dbg_done: prints a DONE event
+ * @addr: endpoint address
+ * @td: transfer descriptor
+ * @status: status
+ */
+static void dbg_done(u8 addr, const u32 token, int status)
+{
+ char msg[DBG_DATA_MSG];
+
+ scnprintf(msg, sizeof(msg), "%d %02X",
+ (int)(token & TD_TOTAL_BYTES) >> ffs_nr(TD_TOTAL_BYTES),
+ (int)(token & TD_STATUS) >> ffs_nr(TD_STATUS));
+ dbg_print(addr, "DONE", status, msg);
+}
+
+/**
+ * dbg_event: prints a generic event
+ * @addr: endpoint address
+ * @name: event name
+ * @status: status
+ */
+static void dbg_event(u8 addr, const char *name, int status)
+{
+ if (name != NULL)
+ dbg_print(addr, name, status, "");
+}
+
+/*
+ * dbg_queue: prints a QUEUE event
+ * @addr: endpoint address
+ * @req: USB request
+ * @status: status
+ */
+static void dbg_queue(u8 addr, const struct usb_request *req, int status)
+{
+ char msg[DBG_DATA_MSG];
+
+ if (req != NULL) {
+ scnprintf(msg, sizeof(msg),
+ "%d %d", !req->no_interrupt, req->length);
+ dbg_print(addr, "QUEUE", status, msg);
+ }
+}
+
+/**
+ * dbg_setup: prints a SETUP event
+ * @addr: endpoint address
+ * @req: setup request
+ */
+static void dbg_setup(u8 addr, const struct usb_ctrlrequest *req)
+{
+ char msg[DBG_DATA_MSG];
+
+ if (req != NULL) {
+ scnprintf(msg, sizeof(msg),
+ "%02X %02X %04X %04X %d", req->bRequestType,
+ req->bRequest, le16_to_cpu(req->wValue),
+ le16_to_cpu(req->wIndex), le16_to_cpu(req->wLength));
+ dbg_print(addr, "SETUP", 0, msg);
+ }
+}
+
+/**
+ * show_events: displays the event buffer
+ *
+ * Check "device.h" for details
+ */
+static ssize_t show_events(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long flags;
+ unsigned i, j, n = 0;
+
+ dbg_trace("[%s] %p\n", __func__, buf);
+ if (attr == NULL || buf == NULL) {
+ dev_err(dev, "[%s] EINVAL\n", __func__);
+ return 0;
+ }
+
+ read_lock_irqsave(&dbg_data.lck, flags);
+
+ i = dbg_data.idx;
+ for (dbg_dec(&i); i != dbg_data.idx; dbg_dec(&i)) {
+ n += strlen(dbg_data.buf[i]);
+ if (n >= PAGE_SIZE) {
+ n -= strlen(dbg_data.buf[i]);
+ break;
+ }
+ }
+ for (j = 0, dbg_inc(&i); j < n; dbg_inc(&i))
+ j += scnprintf(buf + j, PAGE_SIZE - j,
+ "%s", dbg_data.buf[i]);
+
+ read_unlock_irqrestore(&dbg_data.lck, flags);
+
+ return n;
+}
+
+/**
+ * store_events: configure if events are going to be also printed to console
+ *
+ * Check "device.h" for details
+ */
+static ssize_t store_events(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned tty;
+
+ dbg_trace("[%s] %p, %d\n", __func__, buf, count);
+ if (attr == NULL || buf == NULL) {
+ dev_err(dev, "[%s] EINVAL\n", __func__);
+ goto done;
+ }
+
+ if (sscanf(buf, "%u", &tty) != 1 || tty > 1) {
+ dev_err(dev, "<1|0>: enable|disable console log\n");
+ goto done;
+ }
+
+ dbg_data.tty = tty;
+ dev_info(dev, "tty = %u", dbg_data.tty);
+
+ done:
+ return count;
+}
+static DEVICE_ATTR(events, S_IRUSR | S_IWUSR, show_events, store_events);
+
+/**
+ * show_inters: interrupt status, enable status and historic
+ *
+ * Check "device.h" for details
+ */
+static ssize_t show_inters(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
+ unsigned long flags;
+ u32 intr;
+ unsigned i, j, n = 0;
+
+ dbg_trace("[%s] %p\n", __func__, buf);
+ if (attr == NULL || buf == NULL) {
+ dev_err(dev, "[%s] EINVAL\n", __func__);
+ return 0;
+ }
+
+ spin_lock_irqsave(udc->lock, flags);
+
+ n += scnprintf(buf + n, PAGE_SIZE - n,
+ "status = %08x\n", hw_read_intr_status());
+ n += scnprintf(buf + n, PAGE_SIZE - n,
+ "enable = %08x\n", hw_read_intr_enable());
+
+ n += scnprintf(buf + n, PAGE_SIZE - n, "*test = %d\n",
+ isr_statistics.test);
+ n += scnprintf(buf + n, PAGE_SIZE - n, "» ui = %d\n",
+ isr_statistics.ui);
+ n += scnprintf(buf + n, PAGE_SIZE - n, "» uei = %d\n",
+ isr_statistics.uei);
+ n += scnprintf(buf + n, PAGE_SIZE - n, "» pci = %d\n",
+ isr_statistics.pci);
+ n += scnprintf(buf + n, PAGE_SIZE - n, "» uri = %d\n",
+ isr_statistics.uri);
+ n += scnprintf(buf + n, PAGE_SIZE - n, "» sli = %d\n",
+ isr_statistics.sli);
+ n += scnprintf(buf + n, PAGE_SIZE - n, "*none = %d\n",
+ isr_statistics.none);
+ n += scnprintf(buf + n, PAGE_SIZE - n, "*hndl = %d\n",
+ isr_statistics.hndl.cnt);
+
+ for (i = isr_statistics.hndl.idx, j = 0; j <= ISR_MASK; j++, i++) {
+ i &= ISR_MASK;
+ intr = isr_statistics.hndl.buf[i];
+
+ if (USBi_UI & intr)
+ n += scnprintf(buf + n, PAGE_SIZE - n, "ui ");
+ intr &= ~USBi_UI;
+ if (USBi_UEI & intr)
+ n += scnprintf(buf + n, PAGE_SIZE - n, "uei ");
+ intr &= ~USBi_UEI;
+ if (USBi_PCI & intr)
+ n += scnprintf(buf + n, PAGE_SIZE - n, "pci ");
+ intr &= ~USBi_PCI;
+ if (USBi_URI & intr)
+ n += scnprintf(buf + n, PAGE_SIZE - n, "uri ");
+ intr &= ~USBi_URI;
+ if (USBi_SLI & intr)
+ n += scnprintf(buf + n, PAGE_SIZE - n, "sli ");
+ intr &= ~USBi_SLI;
+ if (intr)
+ n += scnprintf(buf + n, PAGE_SIZE - n, "??? ");
+ if (isr_statistics.hndl.buf[i])
+ n += scnprintf(buf + n, PAGE_SIZE - n, "\n");
+ }
+
+ spin_unlock_irqrestore(udc->lock, flags);
+
+ return n;
+}
+
+/**
+ * store_inters: enable & force or disable an individual interrutps
+ * (to be used for test purposes only)
+ *
+ * Check "device.h" for details
+ */
+static ssize_t store_inters(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
+ unsigned long flags;
+ unsigned en, bit;
+
+ dbg_trace("[%s] %p, %d\n", __func__, buf, count);
+ if (attr == NULL || buf == NULL) {
+ dev_err(dev, "[%s] EINVAL\n", __func__);
+ goto done;
+ }
+
+ if (sscanf(buf, "%u %u", &en, &bit) != 2 || en > 1) {
+ dev_err(dev, "<1|0> : enable|disable interrupt");
+ goto done;
+ }
+
+ spin_lock_irqsave(udc->lock, flags);
+ if (en) {
+ if (hw_intr_force(bit))
+ dev_err(dev, "invalid bit number\n");
+ else
+ isr_statistics.test++;
+ } else {
+ if (hw_intr_clear(bit))
+ dev_err(dev, "invalid bit number\n");
+ }
+ spin_unlock_irqrestore(udc->lock, flags);
+
+ done:
+ return count;
+}
+static DEVICE_ATTR(inters, S_IRUSR | S_IWUSR, show_inters, store_inters);
+
+/**
+ * show_port_test: reads port test mode
+ *
+ * Check "device.h" for details
+ */
+static ssize_t show_port_test(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
+ unsigned long flags;
+ unsigned mode;
+
+ dbg_trace("[%s] %p\n", __func__, buf);
+ if (attr == NULL || buf == NULL) {
+ dev_err(dev, "[%s] EINVAL\n", __func__);
+ return 0;
+ }
+
+ spin_lock_irqsave(udc->lock, flags);
+ mode = hw_port_test_get();
+ spin_unlock_irqrestore(udc->lock, flags);
+
+ return scnprintf(buf, PAGE_SIZE, "mode = %u\n", mode);
+}
+
+/**
+ * store_port_test: writes port test mode
+ *
+ * Check "device.h" for details
+ */
+static ssize_t store_port_test(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
+ unsigned long flags;
+ unsigned mode;
+
+ dbg_trace("[%s] %p, %d\n", __func__, buf, count);
+ if (attr == NULL || buf == NULL) {
+ dev_err(dev, "[%s] EINVAL\n", __func__);
+ goto done;
+ }
+
+ if (sscanf(buf, "%u", &mode) != 1) {
+ dev_err(dev, ": set port test mode");
+ goto done;
+ }
+
+ spin_lock_irqsave(udc->lock, flags);
+ if (hw_port_test_set(mode))
+ dev_err(dev, "invalid mode\n");
+ spin_unlock_irqrestore(udc->lock, flags);
+
+ done:
+ return count;
+}
+static DEVICE_ATTR(port_test, S_IRUSR | S_IWUSR,
+ show_port_test, store_port_test);
+
+/**
+ * show_qheads: DMA contents of all queue heads
+ *
+ * Check "device.h" for details
+ */
+static ssize_t show_qheads(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
+ unsigned long flags;
+ unsigned i, j, n = 0;
+
+ dbg_trace("[%s] %p\n", __func__, buf);
+ if (attr == NULL || buf == NULL) {
+ dev_err(dev, "[%s] EINVAL\n", __func__);
+ return 0;
+ }
+
+ spin_lock_irqsave(udc->lock, flags);
+ for (i = 0; i < hw_ep_max; i++) {
+ struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[i];
+ n += scnprintf(buf + n, PAGE_SIZE - n,
+ "EP=%02i: RX=%08X TX=%08X\n",
+ i, (u32)mEp->qh[RX].dma, (u32)mEp->qh[TX].dma);
+ for (j = 0; j < (sizeof(struct ci13xxx_qh)/sizeof(u32)); j++) {
+ n += scnprintf(buf + n, PAGE_SIZE - n,
+ " %04X: %08X %08X\n", j,
+ *((u32 *)mEp->qh[RX].ptr + j),
+ *((u32 *)mEp->qh[TX].ptr + j));
+ }
+ }
+ spin_unlock_irqrestore(udc->lock, flags);
+
+ return n;
+}
+static DEVICE_ATTR(qheads, S_IRUSR, show_qheads, NULL);
+
+/**
+ * show_registers: dumps all registers
+ *
+ * Check "device.h" for details
+ */
+static ssize_t show_registers(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
+ unsigned long flags;
+ u32 dump[512];
+ unsigned i, k, n = 0;
+
+ dbg_trace("[%s] %p\n", __func__, buf);
+ if (attr == NULL || buf == NULL) {
+ dev_err(dev, "[%s] EINVAL\n", __func__);
+ return 0;
+ }
+
+ spin_lock_irqsave(udc->lock, flags);
+ k = hw_register_read(dump, sizeof(dump)/sizeof(u32));
+ spin_unlock_irqrestore(udc->lock, flags);
+
+ for (i = 0; i < k; i++) {
+ n += scnprintf(buf + n, PAGE_SIZE - n,
+ "reg[0x%04X] = 0x%08X\n",
+ i * (unsigned)sizeof(u32), dump[i]);
+ }
+
+ return n;
+}
+
+/**
+ * store_registers: writes value to register address
+ *
+ * Check "device.h" for details
+ */
+static ssize_t store_registers(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
+ unsigned long addr, data, flags;
+
+ dbg_trace("[%s] %p, %d\n", __func__, buf, count);
+ if (attr == NULL || buf == NULL) {
+ dev_err(dev, "[%s] EINVAL\n", __func__);
+ goto done;
+ }
+
+ if (sscanf(buf, "%li %li", &addr, &data) != 2) {
+ dev_err(dev, " : write data to register address");
+ goto done;
+ }
+
+ spin_lock_irqsave(udc->lock, flags);
+ if (hw_register_write(addr, data))
+ dev_err(dev, "invalid address range\n");
+ spin_unlock_irqrestore(udc->lock, flags);
+
+ done:
+ return count;
+}
+static DEVICE_ATTR(registers, S_IRUSR | S_IWUSR,
+ show_registers, store_registers);
+
+/**
+ * show_requests: DMA contents of all requests currently queued (all endpts)
+ *
+ * Check "device.h" for details
+ */
+static ssize_t show_requests(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
+ unsigned long flags;
+ struct list_head *ptr = NULL;
+ struct ci13xxx_req *req = NULL;
+ unsigned i, j, k, n = 0, qSize = sizeof(struct ci13xxx_td)/sizeof(u32);
+
+ dbg_trace("[%s] %p\n", __func__, buf);
+ if (attr == NULL || buf == NULL) {
+ dev_err(dev, "[%s] EINVAL\n", __func__);
+ return 0;
+ }
+
+ spin_lock_irqsave(udc->lock, flags);
+ for (i = 0; i < hw_ep_max; i++)
+ for (k = RX; k <= TX; k++)
+ list_for_each(ptr, &udc->ci13xxx_ep[i].qh[k].queue)
+ {
+ req = list_entry(ptr,
+ struct ci13xxx_req, queue);
+
+ n += scnprintf(buf + n, PAGE_SIZE - n,
+ "EP=%02i: TD=%08X %s\n",
+ i, (u32)req->dma,
+ ((k == RX) ? "RX" : "TX"));
+
+ for (j = 0; j < qSize; j++)
+ n += scnprintf(buf + n, PAGE_SIZE - n,
+ " %04X: %08X\n", j,
+ *((u32 *)req->ptr + j));
+ }
+ spin_unlock_irqrestore(udc->lock, flags);
+
+ return n;
+}
+static DEVICE_ATTR(requests, S_IRUSR, show_requests, NULL);
+
+/**
+ * dbg_create_files: initializes the attribute interface
+ * @dev: device
+ *
+ * This function returns an error code
+ */
+__maybe_unused static int dbg_create_files(struct device *dev)
+{
+ int retval = 0;
+
+ if (dev == NULL)
+ return -EINVAL;
+ retval = device_create_file(dev, &dev_attr_device);
+ if (retval)
+ goto done;
+ retval = device_create_file(dev, &dev_attr_driver);
+ if (retval)
+ goto rm_device;
+ retval = device_create_file(dev, &dev_attr_events);
+ if (retval)
+ goto rm_driver;
+ retval = device_create_file(dev, &dev_attr_inters);
+ if (retval)
+ goto rm_events;
+ retval = device_create_file(dev, &dev_attr_port_test);
+ if (retval)
+ goto rm_inters;
+ retval = device_create_file(dev, &dev_attr_qheads);
+ if (retval)
+ goto rm_port_test;
+ retval = device_create_file(dev, &dev_attr_registers);
+ if (retval)
+ goto rm_qheads;
+ retval = device_create_file(dev, &dev_attr_requests);
+ if (retval)
+ goto rm_registers;
+ return 0;
+
+ rm_registers:
+ device_remove_file(dev, &dev_attr_registers);
+ rm_qheads:
+ device_remove_file(dev, &dev_attr_qheads);
+ rm_port_test:
+ device_remove_file(dev, &dev_attr_port_test);
+ rm_inters:
+ device_remove_file(dev, &dev_attr_inters);
+ rm_events:
+ device_remove_file(dev, &dev_attr_events);
+ rm_driver:
+ device_remove_file(dev, &dev_attr_driver);
+ rm_device:
+ device_remove_file(dev, &dev_attr_device);
+ done:
+ return retval;
+}
+
+/**
+ * dbg_remove_files: destroys the attribute interface
+ * @dev: device
+ *
+ * This function returns an error code
+ */
+__maybe_unused static int dbg_remove_files(struct device *dev)
+{
+ if (dev == NULL)
+ return -EINVAL;
+ device_remove_file(dev, &dev_attr_requests);
+ device_remove_file(dev, &dev_attr_registers);
+ device_remove_file(dev, &dev_attr_qheads);
+ device_remove_file(dev, &dev_attr_port_test);
+ device_remove_file(dev, &dev_attr_inters);
+ device_remove_file(dev, &dev_attr_events);
+ device_remove_file(dev, &dev_attr_driver);
+ device_remove_file(dev, &dev_attr_device);
+ return 0;
+}
+
+/******************************************************************************
+ * UTIL block
+ *****************************************************************************/
+/**
+ * _usb_addr: calculates endpoint address from direction & number
+ * @ep: endpoint
+ */
+static inline u8 _usb_addr(struct ci13xxx_ep *ep)
+{
+ return ((ep->dir == TX) ? USB_ENDPOINT_DIR_MASK : 0) | ep->num;
+}
+
+/**
+ * _hardware_queue: configures a request at hardware level
+ * @gadget: gadget
+ * @mEp: endpoint
+ *
+ * This function returns an error code
+ */
+static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
+{
+ unsigned i;
+
+ trace("%p, %p", mEp, mReq);
+
+ /* don't queue twice */
+ if (mReq->req.status == -EALREADY)
+ return -EALREADY;
+
+ if (hw_ep_is_primed(mEp->num, mEp->dir))
+ return -EBUSY;
+
+ mReq->req.status = -EALREADY;
+
+ if (mReq->req.length && !mReq->req.dma) {
+ mReq->req.dma = \
+ dma_map_single(mEp->device, mReq->req.buf,
+ mReq->req.length, mEp->dir ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ if (mReq->req.dma == 0)
+ return -ENOMEM;
+
+ mReq->map = 1;
+ }
+
+ /*
+ * TD configuration
+ * TODO - handle requests which spawns into several TDs
+ */
+ memset(mReq->ptr, 0, sizeof(*mReq->ptr));
+ mReq->ptr->next |= TD_TERMINATE;
+ mReq->ptr->token = mReq->req.length << ffs_nr(TD_TOTAL_BYTES);
+ mReq->ptr->token &= TD_TOTAL_BYTES;
+ mReq->ptr->token |= TD_IOC;
+ mReq->ptr->token |= TD_STATUS_ACTIVE;
+ mReq->ptr->page[0] = mReq->req.dma;
+ for (i = 1; i < 5; i++)
+ mReq->ptr->page[i] =
+ (mReq->req.dma + i * PAGE_SIZE) & ~TD_RESERVED_MASK;
+
+ /*
+ * QH configuration
+ * At this point it's guaranteed exclusive access to qhead
+ * (endpt is not primed) so it's no need to use tripwire
+ */
+ mEp->qh[mEp->dir].ptr->td.next = mReq->dma; /* TERMINATE = 0 */
+ mEp->qh[mEp->dir].ptr->td.token &= ~TD_STATUS; /* clear status */
+ if (mReq->req.zero == 0)
+ mEp->qh[mEp->dir].ptr->cap |= QH_ZLT;
+ else
+ mEp->qh[mEp->dir].ptr->cap &= ~QH_ZLT;
+
+ wmb(); /* synchronize before ep prime */
+
+ return hw_ep_prime(mEp->num, mEp->dir,
+ mEp->type == USB_ENDPOINT_XFER_CONTROL);
+}
+
+/**
+ * _hardware_dequeue: handles a request at hardware level
+ * @gadget: gadget
+ * @mEp: endpoint
+ *
+ * This function returns an error code
+ */
+static int _hardware_dequeue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
+{
+ trace("%p, %p", mEp, mReq);
+
+ if (mReq->req.status != -EALREADY)
+ return -EINVAL;
+
+ if (hw_ep_is_primed(mEp->num, mEp->dir))
+ hw_ep_flush(mEp->num, mEp->dir);
+
+ mReq->req.status = 0;
+
+ if (mReq->map) {
+ dma_unmap_single(mEp->device, mReq->req.dma, mReq->req.length,
+ mEp->dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ mReq->req.dma = 0;
+ mReq->map = 0;
+ }
+
+ mReq->req.status = mReq->ptr->token & TD_STATUS;
+ if ((TD_STATUS_ACTIVE & mReq->req.status) != 0)
+ mReq->req.status = -ECONNRESET;
+ else if ((TD_STATUS_HALTED & mReq->req.status) != 0)
+ mReq->req.status = -1;
+ else if ((TD_STATUS_DT_ERR & mReq->req.status) != 0)
+ mReq->req.status = -1;
+ else if ((TD_STATUS_TR_ERR & mReq->req.status) != 0)
+ mReq->req.status = -1;
+
+ mReq->req.actual = mReq->ptr->token & TD_TOTAL_BYTES;
+ mReq->req.actual >>= ffs_nr(TD_TOTAL_BYTES);
+ mReq->req.actual = mReq->req.length - mReq->req.actual;
+ mReq->req.actual = mReq->req.status ? 0 : mReq->req.actual;
+
+ return mReq->req.actual;
+}
+
+/**
+ * _ep_nuke: dequeues all endpoint requests
+ * @mEp: endpoint
+ *
+ * This function returns an error code
+ * Caller must hold lock
+ */
+static int _ep_nuke(struct ci13xxx_ep *mEp)
+__releases(mEp->lock)
+__acquires(mEp->lock)
+{
+ trace("%p", mEp);
+
+ if (mEp == NULL)
+ return -EINVAL;
+
+ hw_ep_flush(mEp->num, mEp->dir);
+
+ while (!list_empty(&mEp->qh[mEp->dir].queue)) {
+
+ /* pop oldest request */
+ struct ci13xxx_req *mReq = \
+ list_entry(mEp->qh[mEp->dir].queue.next,
+ struct ci13xxx_req, queue);
+ list_del_init(&mReq->queue);
+ mReq->req.status = -ESHUTDOWN;
+
+ if (!mReq->req.no_interrupt && mReq->req.complete != NULL) {
+ spin_unlock(mEp->lock);
+ mReq->req.complete(&mEp->ep, &mReq->req);
+ spin_lock(mEp->lock);
+ }
+ }
+ return 0;
+}
+
+/**
+ * _gadget_stop_activity: stops all USB activity, flushes & disables all endpts
+ * @gadget: gadget
+ *
+ * This function returns an error code
+ * Caller must hold lock
+ */
+static int _gadget_stop_activity(struct usb_gadget *gadget)
+__releases(udc->lock)
+__acquires(udc->lock)
+{
+ struct usb_ep *ep;
+ struct ci13xxx *udc = container_of(gadget, struct ci13xxx, gadget);
+ struct ci13xxx_ep *mEp = container_of(gadget->ep0,
+ struct ci13xxx_ep, ep);
+
+ trace("%p", gadget);
+
+ if (gadget == NULL)
+ return -EINVAL;
+
+ spin_unlock(udc->lock);
+
+ /* flush all endpoints */
+ gadget_for_each_ep(ep, gadget) {
+ usb_ep_fifo_flush(ep);
+ }
+ usb_ep_fifo_flush(gadget->ep0);
+
+ udc->driver->disconnect(gadget);
+
+ /* make sure to disable all endpoints */
+ gadget_for_each_ep(ep, gadget) {
+ usb_ep_disable(ep);
+ }
+ usb_ep_disable(gadget->ep0);
+
+ if (mEp->status != NULL) {
+ usb_ep_free_request(gadget->ep0, mEp->status);
+ mEp->status = NULL;
+ }
+
+ spin_lock(udc->lock);
+
+ return 0;
+}
+
+/******************************************************************************
+ * ISR block
+ *****************************************************************************/
+/**
+ * isr_reset_handler: USB reset interrupt handler
+ * @udc: UDC device
+ *
+ * This function resets USB engine after a bus reset occurred
+ */
+static void isr_reset_handler(struct ci13xxx *udc)
+__releases(udc->lock)
+__acquires(udc->lock)
+{
+ struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[0];
+ int retval;
+
+ trace("%p", udc);
+
+ if (udc == NULL) {
+ err("EINVAL");
+ return;
+ }
+
+ dbg_event(0xFF, "BUS RST", 0);
+
+ retval = _gadget_stop_activity(&udc->gadget);
+ if (retval)
+ goto done;
+
+ retval = hw_usb_reset();
+ if (retval)
+ goto done;
+
+ spin_unlock(udc->lock);
+ retval = usb_ep_enable(&mEp->ep, &ctrl_endpt_desc);
+ if (!retval) {
+ mEp->status = usb_ep_alloc_request(&mEp->ep, GFP_KERNEL);
+ if (mEp->status == NULL) {
+ usb_ep_disable(&mEp->ep);
+ retval = -ENOMEM;
+ }
+ }
+ spin_lock(udc->lock);
+
+ done:
+ if (retval)
+ err("error: %i", retval);
+}
+
+/**
+ * isr_get_status_complete: get_status request complete function
+ * @ep: endpoint
+ * @req: request handled
+ *
+ * Caller must release lock
+ */
+static void isr_get_status_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ trace("%p, %p", ep, req);
+
+ if (ep == NULL || req == NULL) {
+ err("EINVAL");
+ return;
+ }
+
+ kfree(req->buf);
+ usb_ep_free_request(ep, req);
+}
+
+/**
+ * isr_get_status_response: get_status request response
+ * @ep: endpoint
+ * @setup: setup request packet
+ *
+ * This function returns an error code
+ */
+static int isr_get_status_response(struct ci13xxx_ep *mEp,
+ struct usb_ctrlrequest *setup)
+__releases(mEp->lock)
+__acquires(mEp->lock)
+{
+ struct usb_request *req = NULL;
+ gfp_t gfp_flags = GFP_ATOMIC;
+ int dir, num, retval;
+
+ trace("%p, %p", mEp, setup);
+
+ if (mEp == NULL || setup == NULL)
+ return -EINVAL;
+
+ spin_unlock(mEp->lock);
+ req = usb_ep_alloc_request(&mEp->ep, gfp_flags);
+ spin_lock(mEp->lock);
+ if (req == NULL)
+ return -ENOMEM;
+
+ req->complete = isr_get_status_complete;
+ req->length = 2;
+ req->buf = kzalloc(req->length, gfp_flags);
+ if (req->buf == NULL) {
+ retval = -ENOMEM;
+ goto err_free_req;
+ }
+
+ if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
+ /* TODO: D1 - Remote Wakeup; D0 - Self Powered */
+ retval = 0;
+ } else if ((setup->bRequestType & USB_RECIP_MASK) \
+ == USB_RECIP_ENDPOINT) {
+ dir = (le16_to_cpu(setup->wIndex) & USB_ENDPOINT_DIR_MASK) ?
+ TX : RX;
+ num = le16_to_cpu(setup->wIndex) & USB_ENDPOINT_NUMBER_MASK;
+ *((u16 *)req->buf) = hw_ep_get_halt(num, dir);
+ }
+ /* else do nothing; reserved for future use */
+
+ spin_unlock(mEp->lock);
+ retval = usb_ep_queue(&mEp->ep, req, gfp_flags);
+ spin_lock(mEp->lock);
+ if (retval)
+ goto err_free_buf;
+
+ return 0;
+
+ err_free_buf:
+ kfree(req->buf);
+ err_free_req:
+ spin_unlock(mEp->lock);
+ usb_ep_free_request(&mEp->ep, req);
+ spin_lock(mEp->lock);
+ return retval;
+}
+
+/**
+ * isr_setup_status_phase: queues the status phase of a setup transation
+ * @mEp: endpoint
+ *
+ * This function returns an error code
+ */
+static int isr_setup_status_phase(struct ci13xxx_ep *mEp)
+__releases(mEp->lock)
+__acquires(mEp->lock)
+{
+ int retval;
+
+ trace("%p", mEp);
+
+ /* mEp is always valid & configured */
+
+ if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
+ mEp->dir = (mEp->dir == TX) ? RX : TX;
+
+ mEp->status->no_interrupt = 1;
+
+ spin_unlock(mEp->lock);
+ retval = usb_ep_queue(&mEp->ep, mEp->status, GFP_ATOMIC);
+ spin_lock(mEp->lock);
+
+ return retval;
+}
+
+/**
+ * isr_tr_complete_low: transaction complete low level handler
+ * @mEp: endpoint
+ *
+ * This function returns an error code
+ * Caller must hold lock
+ */
+static int isr_tr_complete_low(struct ci13xxx_ep *mEp)
+__releases(mEp->lock)
+__acquires(mEp->lock)
+{
+ struct ci13xxx_req *mReq;
+ int retval;
+
+ trace("%p", mEp);
+
+ if (list_empty(&mEp->qh[mEp->dir].queue))
+ return -EINVAL;
+
+ /* pop oldest request */
+ mReq = list_entry(mEp->qh[mEp->dir].queue.next,
+ struct ci13xxx_req, queue);
+ list_del_init(&mReq->queue);
+
+ retval = _hardware_dequeue(mEp, mReq);
+ if (retval < 0) {
+ dbg_event(_usb_addr(mEp), "DONE", retval);
+ goto done;
+ }
+
+ dbg_done(_usb_addr(mEp), mReq->ptr->token, retval);
+
+ if (!mReq->req.no_interrupt && mReq->req.complete != NULL) {
+ spin_unlock(mEp->lock);
+ mReq->req.complete(&mEp->ep, &mReq->req);
+ spin_lock(mEp->lock);
+ }
+
+ if (!list_empty(&mEp->qh[mEp->dir].queue)) {
+ mReq = list_entry(mEp->qh[mEp->dir].queue.next,
+ struct ci13xxx_req, queue);
+ _hardware_enqueue(mEp, mReq);
+ }
+
+ done:
+ return retval;
+}
+
+/**
+ * isr_tr_complete_handler: transaction complete interrupt handler
+ * @udc: UDC descriptor
+ *
+ * This function handles traffic events
+ */
+static void isr_tr_complete_handler(struct ci13xxx *udc)
+__releases(udc->lock)
+__acquires(udc->lock)
+{
+ unsigned i;
+
+ trace("%p", udc);
+
+ if (udc == NULL) {
+ err("EINVAL");
+ return;
+ }
+
+ for (i = 0; i < hw_ep_max; i++) {
+ struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[i];
+ int type, num, err = -EINVAL;
+ struct usb_ctrlrequest req;
+
+
+ if (mEp->desc == NULL)
+ continue; /* not configured */
+
+ if ((mEp->dir == RX && hw_test_and_clear_complete(i)) ||
+ (mEp->dir == TX && hw_test_and_clear_complete(i + 16))) {
+ err = isr_tr_complete_low(mEp);
+ if (mEp->type == USB_ENDPOINT_XFER_CONTROL) {
+ if (err > 0) /* needs status phase */
+ err = isr_setup_status_phase(mEp);
+ if (err < 0) {
+ dbg_event(_usb_addr(mEp),
+ "ERROR", err);
+ spin_unlock(udc->lock);
+ if (usb_ep_set_halt(&mEp->ep))
+ err("error: ep_set_halt");
+ spin_lock(udc->lock);
+ }
+ }
+ }
+
+ if (mEp->type != USB_ENDPOINT_XFER_CONTROL ||
+ !hw_test_and_clear_setup_status(i))
+ continue;
+
+ if (i != 0) {
+ warn("ctrl traffic received at endpoint");
+ continue;
+ }
+
+ /* read_setup_packet */
+ do {
+ hw_test_and_set_setup_guard();
+ memcpy(&req, &mEp->qh[RX].ptr->setup, sizeof(req));
+ } while (!hw_test_and_clear_setup_guard());
+
+ type = req.bRequestType;
+
+ mEp->dir = (type & USB_DIR_IN) ? TX : RX;
+
+ dbg_setup(_usb_addr(mEp), &req);
+
+ switch (req.bRequest) {
+ case USB_REQ_CLEAR_FEATURE:
+ if (type != (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
+ le16_to_cpu(req.wValue) != USB_ENDPOINT_HALT)
+ goto delegate;
+ if (req.wLength != 0)
+ break;
+ num = le16_to_cpu(req.wIndex);
+ num &= USB_ENDPOINT_NUMBER_MASK;
+ if (!udc->ci13xxx_ep[num].wedge) {
+ spin_unlock(udc->lock);
+ err = usb_ep_clear_halt(
+ &udc->ci13xxx_ep[num].ep);
+ spin_lock(udc->lock);
+ if (err)
+ break;
+ }
+ err = isr_setup_status_phase(mEp);
+ break;
+ case USB_REQ_GET_STATUS:
+ if (type != (USB_DIR_IN|USB_RECIP_DEVICE) &&
+ type != (USB_DIR_IN|USB_RECIP_ENDPOINT) &&
+ type != (USB_DIR_IN|USB_RECIP_INTERFACE))
+ goto delegate;
+ if (le16_to_cpu(req.wLength) != 2 ||
+ le16_to_cpu(req.wValue) != 0)
+ break;
+ err = isr_get_status_response(mEp, &req);
+ break;
+ case USB_REQ_SET_ADDRESS:
+ if (type != (USB_DIR_OUT|USB_RECIP_DEVICE))
+ goto delegate;
+ if (le16_to_cpu(req.wLength) != 0 ||
+ le16_to_cpu(req.wIndex) != 0)
+ break;
+ err = hw_usb_set_address((u8)le16_to_cpu(req.wValue));
+ if (err)
+ break;
+ err = isr_setup_status_phase(mEp);
+ break;
+ case USB_REQ_SET_FEATURE:
+ if (type != (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
+ le16_to_cpu(req.wValue) != USB_ENDPOINT_HALT)
+ goto delegate;
+ if (req.wLength != 0)
+ break;
+ num = le16_to_cpu(req.wIndex);
+ num &= USB_ENDPOINT_NUMBER_MASK;
+
+ spin_unlock(udc->lock);
+ err = usb_ep_set_halt(&udc->ci13xxx_ep[num].ep);
+ spin_lock(udc->lock);
+ if (err)
+ break;
+ err = isr_setup_status_phase(mEp);
+ break;
+ default:
+delegate:
+ if (req.wLength == 0) /* no data phase */
+ mEp->dir = TX;
+
+ spin_unlock(udc->lock);
+ err = udc->driver->setup(&udc->gadget, &req);
+ spin_lock(udc->lock);
+ break;
+ }
+
+ if (err < 0) {
+ dbg_event(_usb_addr(mEp), "ERROR", err);
+
+ spin_unlock(udc->lock);
+ if (usb_ep_set_halt(&mEp->ep))
+ err("error: ep_set_halt");
+ spin_lock(udc->lock);
+ }
+ }
+}
+
+/******************************************************************************
+ * ENDPT block
+ *****************************************************************************/
+/**
+ * ep_enable: configure endpoint, making it usable
+ *
+ * Check usb_ep_enable() at "usb_gadget.h" for details
+ */
+static int ep_enable(struct usb_ep *ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
+ int direction, retval = 0;
+ unsigned long flags;
+
+ trace("%p, %p", ep, desc);
+
+ if (ep == NULL || desc == NULL)
+ return -EINVAL;
+
+ spin_lock_irqsave(mEp->lock, flags);
+
+ /* only internal SW should enable ctrl endpts */
+
+ mEp->desc = desc;
+
+ if (!list_empty(&mEp->qh[mEp->dir].queue))
+ warn("enabling a non-empty endpoint!");
+
+ mEp->dir = (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) ? TX : RX;
+ mEp->num = desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
+ mEp->type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
+
+ mEp->ep.maxpacket = __constant_le16_to_cpu(desc->wMaxPacketSize);
+
+ direction = mEp->dir;
+ do {
+ dbg_event(_usb_addr(mEp), "ENABLE", 0);
+
+ if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
+ mEp->qh[mEp->dir].ptr->cap |= QH_IOS;
+ else if (mEp->type == USB_ENDPOINT_XFER_ISOC)
+ mEp->qh[mEp->dir].ptr->cap &= ~QH_MULT;
+ else
+ mEp->qh[mEp->dir].ptr->cap &= ~QH_ZLT;
+
+ mEp->qh[mEp->dir].ptr->cap |=
+ (mEp->ep.maxpacket << ffs_nr(QH_MAX_PKT)) & QH_MAX_PKT;
+ mEp->qh[mEp->dir].ptr->td.next |= TD_TERMINATE; /* needed? */
+
+ retval |= hw_ep_enable(mEp->num, mEp->dir, mEp->type);
+
+ if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
+ mEp->dir = (mEp->dir == TX) ? RX : TX;
+
+ } while (mEp->dir != direction);
+
+ spin_unlock_irqrestore(mEp->lock, flags);
+ return retval;
+}
+
+/**
+ * ep_disable: endpoint is no longer usable
+ *
+ * Check usb_ep_disable() at "usb_gadget.h" for details
+ */
+static int ep_disable(struct usb_ep *ep)
+{
+ struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
+ int direction, retval = 0;
+ unsigned long flags;
+
+ trace("%p", ep);
+
+ if (ep == NULL)
+ return -EINVAL;
+ else if (mEp->desc == NULL)
+ return -EBUSY;
+
+ spin_lock_irqsave(mEp->lock, flags);
+
+ /* only internal SW should disable ctrl endpts */
+
+ direction = mEp->dir;
+ do {
+ dbg_event(_usb_addr(mEp), "DISABLE", 0);
+
+ retval |= _ep_nuke(mEp);
+ retval |= hw_ep_disable(mEp->num, mEp->dir);
+
+ if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
+ mEp->dir = (mEp->dir == TX) ? RX : TX;
+
+ } while (mEp->dir != direction);
+
+ mEp->desc = NULL;
+
+ spin_unlock_irqrestore(mEp->lock, flags);
+ return retval;
+}
+
+/**
+ * ep_alloc_request: allocate a request object to use with this endpoint
+ *
+ * Check usb_ep_alloc_request() at "usb_gadget.h" for details
+ */
+static struct usb_request *ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
+{
+ struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
+ struct ci13xxx_req *mReq = NULL;
+ unsigned long flags;
+
+ trace("%p, %i", ep, gfp_flags);
+
+ if (ep == NULL) {
+ err("EINVAL");
+ return NULL;
+ }
+
+ spin_lock_irqsave(mEp->lock, flags);
+
+ mReq = kzalloc(sizeof(struct ci13xxx_req), gfp_flags);
+ if (mReq != NULL) {
+ INIT_LIST_HEAD(&mReq->queue);
+
+ mReq->ptr = dma_pool_alloc(mEp->td_pool, gfp_flags,
+ &mReq->dma);
+ if (mReq->ptr == NULL) {
+ kfree(mReq);
+ mReq = NULL;
+ }
+ }
+
+ dbg_event(_usb_addr(mEp), "ALLOC", mReq == NULL);
+
+ spin_unlock_irqrestore(mEp->lock, flags);
+
+ return (mReq == NULL) ? NULL : &mReq->req;
+}
+
+/**
+ * ep_free_request: frees a request object
+ *
+ * Check usb_ep_free_request() at "usb_gadget.h" for details
+ */
+static void ep_free_request(struct usb_ep *ep, struct usb_request *req)
+{
+ struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
+ struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req);
+ unsigned long flags;
+
+ trace("%p, %p", ep, req);
+
+ if (ep == NULL || req == NULL) {
+ err("EINVAL");
+ return;
+ } else if (!list_empty(&mReq->queue)) {
+ err("EBUSY");
+ return;
+ }
+
+ spin_lock_irqsave(mEp->lock, flags);
+
+ if (mReq->ptr)
+ dma_pool_free(mEp->td_pool, mReq->ptr, mReq->dma);
+ kfree(mReq);
+
+ dbg_event(_usb_addr(mEp), "FREE", 0);
+
+ spin_unlock_irqrestore(mEp->lock, flags);
+}
+
+/**
+ * ep_queue: queues (submits) an I/O request to an endpoint
+ *
+ * Check usb_ep_queue()* at usb_gadget.h" for details
+ */
+static int ep_queue(struct usb_ep *ep, struct usb_request *req,
+ gfp_t __maybe_unused gfp_flags)
+{
+ struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
+ struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req);
+ int retval = 0;
+ unsigned long flags;
+
+ trace("%p, %p, %X", ep, req, gfp_flags);
+
+ if (ep == NULL || req == NULL || mEp->desc == NULL)
+ return -EINVAL;
+
+ spin_lock_irqsave(mEp->lock, flags);
+
+ if (mEp->type == USB_ENDPOINT_XFER_CONTROL &&
+ !list_empty(&mEp->qh[mEp->dir].queue)) {
+ _ep_nuke(mEp);
+ retval = -EOVERFLOW;
+ warn("endpoint ctrl %X nuked", _usb_addr(mEp));
+ }
+
+ /* first nuke then test link, e.g. previous status has not sent */
+ if (!list_empty(&mReq->queue)) {
+ retval = -EBUSY;
+ err("request already in queue");
+ goto done;
+ }
+
+ if (req->length > (4 * PAGE_SIZE)) {
+ req->length = (4 * PAGE_SIZE);
+ retval = -EMSGSIZE;
+ warn("request length truncated");
+ }
+
+ dbg_queue(_usb_addr(mEp), req, retval);
+
+ /* push request */
+ mReq->req.status = -EINPROGRESS;
+ mReq->req.actual = 0;
+ list_add_tail(&mReq->queue, &mEp->qh[mEp->dir].queue);
+
+ retval = _hardware_enqueue(mEp, mReq);
+ if (retval == -EALREADY || retval == -EBUSY) {
+ dbg_event(_usb_addr(mEp), "QUEUE", retval);
+ retval = 0;
+ }
+
+ done:
+ spin_unlock_irqrestore(mEp->lock, flags);
+ return retval;
+}
+
+/**
+ * ep_dequeue: dequeues (cancels, unlinks) an I/O request from an endpoint
+ *
+ * Check usb_ep_dequeue() at "usb_gadget.h" for details
+ */
+static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
+{
+ struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
+ struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req);
+ unsigned long flags;
+
+ trace("%p, %p", ep, req);
+
+ if (ep == NULL || req == NULL || mEp->desc == NULL ||
+ list_empty(&mReq->queue) || list_empty(&mEp->qh[mEp->dir].queue))
+ return -EINVAL;
+
+ spin_lock_irqsave(mEp->lock, flags);
+
+ dbg_event(_usb_addr(mEp), "DEQUEUE", 0);
+
+ if (mReq->req.status == -EALREADY)
+ _hardware_dequeue(mEp, mReq);
+
+ /* pop request */
+ list_del_init(&mReq->queue);
+ req->status = -ECONNRESET;
+
+ if (!mReq->req.no_interrupt && mReq->req.complete != NULL) {
+ spin_unlock(mEp->lock);
+ mReq->req.complete(&mEp->ep, &mReq->req);
+ spin_lock(mEp->lock);
+ }
+
+ spin_unlock_irqrestore(mEp->lock, flags);
+ return 0;
+}
+
+/**
+ * ep_set_halt: sets the endpoint halt feature
+ *
+ * Check usb_ep_set_halt() at "usb_gadget.h" for details
+ */
+static int ep_set_halt(struct usb_ep *ep, int value)
+{
+ struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
+ int direction, retval = 0;
+ unsigned long flags;
+
+ trace("%p, %i", ep, value);
+
+ if (ep == NULL || mEp->desc == NULL)
+ return -EINVAL;
+
+ spin_lock_irqsave(mEp->lock, flags);
+
+#ifndef STALL_IN
+ /* g_file_storage MS compliant but g_zero fails chapter 9 compliance */
+ if (value && mEp->type == USB_ENDPOINT_XFER_BULK && mEp->dir == TX &&
+ !list_empty(&mEp->qh[mEp->dir].queue)) {
+ spin_unlock_irqrestore(mEp->lock, flags);
+ return -EAGAIN;
+ }
+#endif
+
+ direction = mEp->dir;
+ do {
+ dbg_event(_usb_addr(mEp), "HALT", value);
+ retval |= hw_ep_set_halt(mEp->num, mEp->dir, value);
+
+ if (!value)
+ mEp->wedge = 0;
+
+ if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
+ mEp->dir = (mEp->dir == TX) ? RX : TX;
+
+ } while (mEp->dir != direction);
+
+ spin_unlock_irqrestore(mEp->lock, flags);
+ return retval;
+}
+
+/**
+ * ep_set_wedge: sets the halt feature and ignores clear requests
+ *
+ * Check usb_ep_set_wedge() at "usb_gadget.h" for details
+ */
+static int ep_set_wedge(struct usb_ep *ep)
+{
+ struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
+ unsigned long flags;
+
+ trace("%p", ep);
+
+ if (ep == NULL || mEp->desc == NULL)
+ return -EINVAL;
+
+ spin_lock_irqsave(mEp->lock, flags);
+
+ dbg_event(_usb_addr(mEp), "WEDGE", 0);
+ mEp->wedge = 1;
+
+ spin_unlock_irqrestore(mEp->lock, flags);
+
+ return usb_ep_set_halt(ep);
+}
+
+/**
+ * ep_fifo_flush: flushes contents of a fifo
+ *
+ * Check usb_ep_fifo_flush() at "usb_gadget.h" for details
+ */
+static void ep_fifo_flush(struct usb_ep *ep)
+{
+ struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
+ unsigned long flags;
+
+ trace("%p", ep);
+
+ if (ep == NULL) {
+ err("%02X: -EINVAL", _usb_addr(mEp));
+ return;
+ }
+
+ spin_lock_irqsave(mEp->lock, flags);
+
+ dbg_event(_usb_addr(mEp), "FFLUSH", 0);
+ hw_ep_flush(mEp->num, mEp->dir);
+
+ spin_unlock_irqrestore(mEp->lock, flags);
+}
+
+/**
+ * Endpoint-specific part of the API to the USB controller hardware
+ * Check "usb_gadget.h" for details
+ */
+static const struct usb_ep_ops usb_ep_ops = {
+ .enable = ep_enable,
+ .disable = ep_disable,
+ .alloc_request = ep_alloc_request,
+ .free_request = ep_free_request,
+ .queue = ep_queue,
+ .dequeue = ep_dequeue,
+ .set_halt = ep_set_halt,
+ .set_wedge = ep_set_wedge,
+ .fifo_flush = ep_fifo_flush,
+};
+
+/******************************************************************************
+ * GADGET block
+ *****************************************************************************/
+/**
+ * Device operations part of the API to the USB controller hardware,
+ * which don't involve endpoints (or i/o)
+ * Check "usb_gadget.h" for details
+ */
+static const struct usb_gadget_ops usb_gadget_ops;
+
+/**
+ * usb_gadget_register_driver: register a gadget driver
+ *
+ * Check usb_gadget_register_driver() at "usb_gadget.h" for details
+ * Interrupts are enabled here
+ */
+int usb_gadget_register_driver(struct usb_gadget_driver *driver)
+{
+ struct ci13xxx *udc = _udc;
+ unsigned long i, k, flags;
+ int retval = -ENOMEM;
+
+ trace("%p", driver);
+
+ if (driver == NULL ||
+ driver->bind == NULL ||
+ driver->unbind == NULL ||
+ driver->setup == NULL ||
+ driver->disconnect == NULL ||
+ driver->suspend == NULL ||
+ driver->resume == NULL)
+ return -EINVAL;
+ else if (udc == NULL)
+ return -ENODEV;
+ else if (udc->driver != NULL)
+ return -EBUSY;
+
+ /* alloc resources */
+ udc->qh_pool = dma_pool_create("ci13xxx_qh", &udc->gadget.dev,
+ sizeof(struct ci13xxx_qh),
+ 64, PAGE_SIZE);
+ if (udc->qh_pool == NULL)
+ return -ENOMEM;
+
+ udc->td_pool = dma_pool_create("ci13xxx_td", &udc->gadget.dev,
+ sizeof(struct ci13xxx_td),
+ 64, PAGE_SIZE);
+ if (udc->td_pool == NULL) {
+ dma_pool_destroy(udc->qh_pool);
+ udc->qh_pool = NULL;
+ return -ENOMEM;
+ }
+
+ spin_lock_irqsave(udc->lock, flags);
+
+ info("hw_ep_max = %d", hw_ep_max);
+
+ udc->driver = driver;
+ udc->gadget.ops = NULL;
+ udc->gadget.dev.driver = NULL;
+
+ retval = 0;
+ for (i = 0; i < hw_ep_max; i++) {
+ struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[i];
+
+ scnprintf(mEp->name, sizeof(mEp->name), "ep%i", (int)i);
+
+ mEp->lock = udc->lock;
+ mEp->device = &udc->gadget.dev;
+ mEp->td_pool = udc->td_pool;
+
+ mEp->ep.name = mEp->name;
+ mEp->ep.ops = &usb_ep_ops;
+ mEp->ep.maxpacket = CTRL_PAYLOAD_MAX;
+
+ /* this allocation cannot be random */
+ for (k = RX; k <= TX; k++) {
+ INIT_LIST_HEAD(&mEp->qh[k].queue);
+ mEp->qh[k].ptr = dma_pool_alloc(udc->qh_pool,
+ GFP_KERNEL,
+ &mEp->qh[k].dma);
+ if (mEp->qh[k].ptr == NULL)
+ retval = -ENOMEM;
+ else
+ memset(mEp->qh[k].ptr, 0,
+ sizeof(*mEp->qh[k].ptr));
+ }
+ if (i == 0)
+ udc->gadget.ep0 = &mEp->ep;
+ else
+ list_add_tail(&mEp->ep.ep_list, &udc->gadget.ep_list);
+ }
+ if (retval)
+ goto done;
+
+ /* bind gadget */
+ driver->driver.bus = NULL;
+ udc->gadget.ops = &usb_gadget_ops;
+ udc->gadget.dev.driver = &driver->driver;
+
+ spin_unlock_irqrestore(udc->lock, flags);
+ retval = driver->bind(&udc->gadget); /* MAY SLEEP */
+ spin_lock_irqsave(udc->lock, flags);
+
+ if (retval) {
+ udc->gadget.ops = NULL;
+ udc->gadget.dev.driver = NULL;
+ goto done;
+ }
+
+ retval = hw_device_state(udc->ci13xxx_ep[0].qh[RX].dma);
+
+ done:
+ spin_unlock_irqrestore(udc->lock, flags);
+ if (retval)
+ usb_gadget_unregister_driver(driver);
+ return retval;
+}
+EXPORT_SYMBOL(usb_gadget_register_driver);
+
+/**
+ * usb_gadget_unregister_driver: unregister a gadget driver
+ *
+ * Check usb_gadget_unregister_driver() at "usb_gadget.h" for details
+ */
+int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
+{
+ struct ci13xxx *udc = _udc;
+ unsigned long i, k, flags;
+
+ trace("%p", driver);
+
+ if (driver == NULL ||
+ driver->bind == NULL ||
+ driver->unbind == NULL ||
+ driver->setup == NULL ||
+ driver->disconnect == NULL ||
+ driver->suspend == NULL ||
+ driver->resume == NULL ||
+ driver != udc->driver)
+ return -EINVAL;
+
+ spin_lock_irqsave(udc->lock, flags);
+
+ hw_device_state(0);
+
+ /* unbind gadget */
+ if (udc->gadget.ops != NULL) {
+ _gadget_stop_activity(&udc->gadget);
+
+ spin_unlock_irqrestore(udc->lock, flags);
+ driver->unbind(&udc->gadget); /* MAY SLEEP */
+ spin_lock_irqsave(udc->lock, flags);
+
+ udc->gadget.ops = NULL;
+ udc->gadget.dev.driver = NULL;
+ }
+
+ /* free resources */
+ for (i = 0; i < hw_ep_max; i++) {
+ struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[i];
+
+ if (i == 0)
+ udc->gadget.ep0 = NULL;
+ else if (!list_empty(&mEp->ep.ep_list))
+ list_del_init(&mEp->ep.ep_list);
+
+ for (k = RX; k <= TX; k++)
+ if (mEp->qh[k].ptr != NULL)
+ dma_pool_free(udc->qh_pool,
+ mEp->qh[k].ptr, mEp->qh[k].dma);
+ }
+
+ udc->driver = NULL;
+
+ spin_unlock_irqrestore(udc->lock, flags);
+
+ if (udc->td_pool != NULL) {
+ dma_pool_destroy(udc->td_pool);
+ udc->td_pool = NULL;
+ }
+ if (udc->qh_pool != NULL) {
+ dma_pool_destroy(udc->qh_pool);
+ udc->qh_pool = NULL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(usb_gadget_unregister_driver);
+
+/******************************************************************************
+ * BUS block
+ *****************************************************************************/
+/**
+ * udc_irq: global interrupt handler
+ *
+ * This function returns IRQ_HANDLED if the IRQ has been handled
+ * It locks access to registers
+ */
+static irqreturn_t udc_irq(void)
+{
+ struct ci13xxx *udc = _udc;
+ irqreturn_t retval;
+ u32 intr;
+
+ trace();
+
+ if (udc == NULL) {
+ err("ENODEV");
+ return IRQ_HANDLED;
+ }
+
+ spin_lock(udc->lock);
+ intr = hw_test_and_clear_intr_active();
+ if (intr) {
+ isr_statistics.hndl.buf[isr_statistics.hndl.idx++] = intr;
+ isr_statistics.hndl.idx &= ISR_MASK;
+ isr_statistics.hndl.cnt++;
+
+ /* order defines priority - do NOT change it */
+ if (USBi_URI & intr) {
+ isr_statistics.uri++;
+ isr_reset_handler(udc);
+ }
+ if (USBi_PCI & intr) {
+ isr_statistics.pci++;
+ udc->gadget.speed = hw_port_is_high_speed() ?
+ USB_SPEED_HIGH : USB_SPEED_FULL;
+ }
+ if (USBi_UEI & intr)
+ isr_statistics.uei++;
+ if (USBi_UI & intr) {
+ isr_statistics.ui++;
+ isr_tr_complete_handler(udc);
+ }
+ if (USBi_SLI & intr)
+ isr_statistics.sli++;
+ retval = IRQ_HANDLED;
+ } else {
+ isr_statistics.none++;
+ retval = IRQ_NONE;
+ }
+ spin_unlock(udc->lock);
+
+ return retval;
+}
+
+/**
+ * udc_release: driver release function
+ * @dev: device
+ *
+ * Currently does nothing
+ */
+static void udc_release(struct device *dev)
+{
+ trace("%p", dev);
+
+ if (dev == NULL)
+ err("EINVAL");
+}
+
+/**
+ * udc_probe: parent probe must call this to initialize UDC
+ * @dev: parent device
+ * @regs: registers base address
+ * @name: driver name
+ *
+ * This function returns an error code
+ * No interrupts active, the IRQ has not been requested yet
+ * Kernel assumes 32-bit DMA operations by default, no need to dma_set_mask
+ */
+static int udc_probe(struct device *dev, void __iomem *regs, const char *name)
+{
+ struct ci13xxx *udc;
+ int retval = 0;
+
+ trace("%p, %p, %p", dev, regs, name);
+
+ if (dev == NULL || regs == NULL || name == NULL)
+ return -EINVAL;
+
+ udc = kzalloc(sizeof(struct ci13xxx), GFP_KERNEL);
+ if (udc == NULL)
+ return -ENOMEM;
+
+ udc->lock = &udc_lock;
+
+ retval = hw_device_reset(regs);
+ if (retval)
+ goto done;
+
+ udc->gadget.ops = NULL;
+ udc->gadget.speed = USB_SPEED_UNKNOWN;
+ udc->gadget.is_dualspeed = 1;
+ udc->gadget.is_otg = 0;
+ udc->gadget.name = name;
+
+ INIT_LIST_HEAD(&udc->gadget.ep_list);
+ udc->gadget.ep0 = NULL;
+
+ strcpy(udc->gadget.dev.bus_id, "gadget");
+ udc->gadget.dev.dma_mask = dev->dma_mask;
+ udc->gadget.dev.parent = dev;
+ udc->gadget.dev.release = udc_release;
+
+ retval = device_register(&udc->gadget.dev);
+ if (retval)
+ goto done;
+
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
+ retval = dbg_create_files(&udc->gadget.dev);
+#endif
+ if (retval) {
+ device_unregister(&udc->gadget.dev);
+ goto done;
+ }
+
+ _udc = udc;
+ return retval;
+
+ done:
+ err("error = %i", retval);
+ kfree(udc);
+ _udc = NULL;
+ return retval;
+}
+
+/**
+ * udc_remove: parent remove must call this to remove UDC
+ *
+ * No interrupts active, the IRQ has been released
+ */
+static void udc_remove(void)
+{
+ struct ci13xxx *udc = _udc;
+
+ if (udc == NULL) {
+ err("EINVAL");
+ return;
+ }
+
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
+ dbg_remove_files(&udc->gadget.dev);
+#endif
+ device_unregister(&udc->gadget.dev);
+
+ kfree(udc);
+ _udc = NULL;
+}
+
+/******************************************************************************
+ * PCI block
+ *****************************************************************************/
+/**
+ * ci13xxx_pci_irq: interrut handler
+ * @irq: irq number
+ * @pdev: USB Device Controller interrupt source
+ *
+ * This function returns IRQ_HANDLED if the IRQ has been handled
+ * This is an ISR don't trace, use attribute interface instead
+ */
+static irqreturn_t ci13xxx_pci_irq(int irq, void *pdev)
+{
+ if (irq == 0) {
+ dev_err(&((struct pci_dev *)pdev)->dev, "Invalid IRQ0 usage!");
+ return IRQ_HANDLED;
+ }
+ return udc_irq();
+}
+
+/**
+ * ci13xxx_pci_probe: PCI probe
+ * @pdev: USB device controller being probed
+ * @id: PCI hotplug ID connecting controller to UDC framework
+ *
+ * This function returns an error code
+ * Allocates basic PCI resources for this USB device controller, and then
+ * invokes the udc_probe() method to start the UDC associated with it
+ */
+static int __devinit ci13xxx_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ void __iomem *regs = NULL;
+ int retval = 0;
+
+ if (id == NULL)
+ return -EINVAL;
+
+ retval = pci_enable_device(pdev);
+ if (retval)
+ goto done;
+
+ if (!pdev->irq) {
+ dev_err(&pdev->dev, "No IRQ, check BIOS/PCI setup!");
+ retval = -ENODEV;
+ goto disable_device;
+ }
+
+ retval = pci_request_regions(pdev, UDC_DRIVER_NAME);
+ if (retval)
+ goto disable_device;
+
+ /* BAR 0 holds all the registers */
+ regs = pci_iomap(pdev, 0, 0);
+ if (!regs) {
+ dev_err(&pdev->dev, "Error mapping memory!");
+ retval = -EFAULT;
+ goto release_regions;
+ }
+ pci_set_drvdata(pdev, (__force void *)regs);
+
+ pci_set_master(pdev);
+ pci_try_set_mwi(pdev);
+
+ retval = udc_probe(&pdev->dev, regs, UDC_DRIVER_NAME);
+ if (retval)
+ goto iounmap;
+
+ /* our device does not have MSI capability */
+
+ retval = request_irq(pdev->irq, ci13xxx_pci_irq, IRQF_SHARED,
+ UDC_DRIVER_NAME, pdev);
+ if (retval)
+ goto gadget_remove;
+
+ return 0;
+
+ gadget_remove:
+ udc_remove();
+ iounmap:
+ pci_iounmap(pdev, regs);
+ release_regions:
+ pci_release_regions(pdev);
+ disable_device:
+ pci_disable_device(pdev);
+ done:
+ return retval;
+}
+
+/**
+ * ci13xxx_pci_remove: PCI remove
+ * @pdev: USB Device Controller being removed
+ *
+ * Reverses the effect of ci13xxx_pci_probe(),
+ * first invoking the udc_remove() and then releases
+ * all PCI resources allocated for this USB device controller
+ */
+static void __devexit ci13xxx_pci_remove(struct pci_dev *pdev)
+{
+ free_irq(pdev->irq, pdev);
+ udc_remove();
+ pci_iounmap(pdev, (__force void __iomem *)pci_get_drvdata(pdev));
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+/**
+ * PCI device table
+ * PCI device structure
+ *
+ * Check "pci.h" for details
+ */
+static DEFINE_PCI_DEVICE_TABLE(ci13xxx_pci_id_table) = {
+ { PCI_DEVICE(0x153F, 0x1004) },
+ { PCI_DEVICE(0x153F, 0x1006) },
+ { 0, 0, 0, 0, 0, 0, 0 /* end: all zeroes */ }
+};
+MODULE_DEVICE_TABLE(pci, ci13xxx_pci_id_table);
+
+static struct pci_driver ci13xxx_pci_driver = {
+ .name = UDC_DRIVER_NAME,
+ .id_table = ci13xxx_pci_id_table,
+ .probe = ci13xxx_pci_probe,
+ .remove = __devexit_p(ci13xxx_pci_remove),
+};
+
+/**
+ * ci13xxx_pci_init: module init
+ *
+ * Driver load
+ */
+static int __init ci13xxx_pci_init(void)
+{
+ return pci_register_driver(&ci13xxx_pci_driver);
+}
+module_init(ci13xxx_pci_init);
+
+/**
+ * ci13xxx_pci_exit: module exit
+ *
+ * Driver unload
+ */
+static void __exit ci13xxx_pci_exit(void)
+{
+ pci_unregister_driver(&ci13xxx_pci_driver);
+}
+module_exit(ci13xxx_pci_exit);
+
+MODULE_AUTHOR("MIPS - David Lopo ");
+MODULE_DESCRIPTION("MIPS CI13XXX USB Peripheral Controller");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("June 2008");
diff --git a/drivers/usb/gadget/ci13xxx_udc.h b/drivers/usb/gadget/ci13xxx_udc.h
new file mode 100644
index 00000000000..4026e9cede3
--- /dev/null
+++ b/drivers/usb/gadget/ci13xxx_udc.h
@@ -0,0 +1,195 @@
+/*
+ * ci13xxx_udc.h - structures, registers, and macros MIPS USB IP core
+ *
+ * Copyright (C) 2008 Chipidea - MIPS Technologies, Inc. All rights reserved.
+ *
+ * Author: David Lopo
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Description: MIPS USB IP core family device controller
+ * Structures, registers and logging macros
+ */
+
+#ifndef _CI13XXX_h_
+#define _CI13XXX_h_
+
+/******************************************************************************
+ * DEFINE
+ *****************************************************************************/
+#define ENDPT_MAX (16)
+#define CTRL_PAYLOAD_MAX (64)
+#define RX (0) /* similar to USB_DIR_OUT but can be used as an index */
+#define TX (1) /* similar to USB_DIR_IN but can be used as an index */
+
+/******************************************************************************
+ * STRUCTURES
+ *****************************************************************************/
+/* DMA layout of transfer descriptors */
+struct ci13xxx_td {
+ /* 0 */
+ u32 next;
+#define TD_TERMINATE BIT(0)
+ /* 1 */
+ u32 token;
+#define TD_STATUS (0x00FFUL << 0)
+#define TD_STATUS_TR_ERR BIT(3)
+#define TD_STATUS_DT_ERR BIT(5)
+#define TD_STATUS_HALTED BIT(6)
+#define TD_STATUS_ACTIVE BIT(7)
+#define TD_MULTO (0x0003UL << 10)
+#define TD_IOC BIT(15)
+#define TD_TOTAL_BYTES (0x7FFFUL << 16)
+ /* 2 */
+ u32 page[5];
+#define TD_CURR_OFFSET (0x0FFFUL << 0)
+#define TD_FRAME_NUM (0x07FFUL << 0)
+#define TD_RESERVED_MASK (0x0FFFUL << 0)
+} __attribute__ ((packed));
+
+/* DMA layout of queue heads */
+struct ci13xxx_qh {
+ /* 0 */
+ u32 cap;
+#define QH_IOS BIT(15)
+#define QH_MAX_PKT (0x07FFUL << 16)
+#define QH_ZLT BIT(29)
+#define QH_MULT (0x0003UL << 30)
+ /* 1 */
+ u32 curr;
+ /* 2 - 8 */
+ struct ci13xxx_td td;
+ /* 9 */
+ u32 RESERVED;
+ struct usb_ctrlrequest setup;
+} __attribute__ ((packed));
+
+/* Extension of usb_request */
+struct ci13xxx_req {
+ struct usb_request req;
+ unsigned map;
+ struct list_head queue;
+ struct ci13xxx_td *ptr;
+ dma_addr_t dma;
+};
+
+/* Extension of usb_ep */
+struct ci13xxx_ep {
+ struct usb_ep ep;
+ const struct usb_endpoint_descriptor *desc;
+ u8 dir;
+ u8 num;
+ u8 type;
+ char name[16];
+ struct {
+ struct list_head queue;
+ struct ci13xxx_qh *ptr;
+ dma_addr_t dma;
+ } qh[2];
+ struct usb_request *status;
+ int wedge;
+
+ /* global resources */
+ spinlock_t *lock;
+ struct device *device;
+ struct dma_pool *td_pool;
+};
+
+/* CI13XXX UDC descriptor & global resources */
+struct ci13xxx {
+ spinlock_t *lock; /* ctrl register bank access */
+
+ struct dma_pool *qh_pool; /* DMA pool for queue heads */
+ struct dma_pool *td_pool; /* DMA pool for transfer descs */
+
+ struct usb_gadget gadget; /* USB slave device */
+ struct ci13xxx_ep ci13xxx_ep[ENDPT_MAX]; /* extended endpts */
+
+ struct usb_gadget_driver *driver; /* 3rd party gadget driver */
+};
+
+/******************************************************************************
+ * REGISTERS
+ *****************************************************************************/
+/* register size */
+#define REG_BITS (32)
+
+/* HCCPARAMS */
+#define HCCPARAMS_LEN BIT(17)
+
+/* DCCPARAMS */
+#define DCCPARAMS_DEN (0x1F << 0)
+#define DCCPARAMS_DC BIT(7)
+
+/* TESTMODE */
+#define TESTMODE_FORCE BIT(0)
+
+/* USBCMD */
+#define USBCMD_RS BIT(0)
+#define USBCMD_RST BIT(1)
+#define USBCMD_SUTW BIT(13)
+
+/* USBSTS & USBINTR */
+#define USBi_UI BIT(0)
+#define USBi_UEI BIT(1)
+#define USBi_PCI BIT(2)
+#define USBi_URI BIT(6)
+#define USBi_SLI BIT(8)
+
+/* DEVICEADDR */
+#define DEVICEADDR_USBADRA BIT(24)
+#define DEVICEADDR_USBADR (0x7FUL << 25)
+
+/* PORTSC */
+#define PORTSC_SUSP BIT(7)
+#define PORTSC_HSP BIT(9)
+#define PORTSC_PTC (0x0FUL << 16)
+
+/* DEVLC */
+#define DEVLC_PSPD (0x03UL << 25)
+#define DEVLC_PSPD_HS (0x02UL << 25)
+
+/* USBMODE */
+#define USBMODE_CM (0x03UL << 0)
+#define USBMODE_CM_IDLE (0x00UL << 0)
+#define USBMODE_CM_DEVICE (0x02UL << 0)
+#define USBMODE_CM_HOST (0x03UL << 0)
+#define USBMODE_SLOM BIT(3)
+
+/* ENDPTCTRL */
+#define ENDPTCTRL_RXS BIT(0)
+#define ENDPTCTRL_RXT (0x03UL << 2)
+#define ENDPTCTRL_RXR BIT(6) /* reserved for port 0 */
+#define ENDPTCTRL_RXE BIT(7)
+#define ENDPTCTRL_TXS BIT(16)
+#define ENDPTCTRL_TXT (0x03UL << 18)
+#define ENDPTCTRL_TXR BIT(22) /* reserved for port 0 */
+#define ENDPTCTRL_TXE BIT(23)
+
+/******************************************************************************
+ * LOGGING
+ *****************************************************************************/
+#define ci13xxx_printk(level, format, args...) \
+do { \
+ if (_udc == NULL) \
+ printk(level "[%s] " format "\n", __func__, ## args); \
+ else \
+ dev_printk(level, _udc->gadget.dev.parent, \
+ "[%s] " format "\n", __func__, ## args); \
+} while (0)
+
+#define err(format, args...) ci13xxx_printk(KERN_ERR, format, ## args)
+#define warn(format, args...) ci13xxx_printk(KERN_WARNING, format, ## args)
+#define info(format, args...) ci13xxx_printk(KERN_INFO, format, ## args)
+
+#ifdef TRACE
+#define trace(format, args...) ci13xxx_printk(KERN_DEBUG, format, ## args)
+#define dbg_trace(format, args...) dev_dbg(dev, format, ##args)
+#else
+#define trace(format, args...) do {} while (0)
+#define dbg_trace(format, args...) do {} while (0)
+#endif
+
+#endif /* _CI13XXX_h_ */
diff --git a/drivers/usb/gadget/epautoconf.c b/drivers/usb/gadget/epautoconf.c
index 9462e30192d..a36b1175b18 100644
--- a/drivers/usb/gadget/epautoconf.c
+++ b/drivers/usb/gadget/epautoconf.c
@@ -161,7 +161,7 @@ ep_matches (
/* report address */
desc->bEndpointAddress &= USB_DIR_IN;
if (isdigit (ep->name [2])) {
- u8 num = simple_strtol (&ep->name [2], NULL, 10);
+ u8 num = simple_strtoul (&ep->name [2], NULL, 10);
desc->bEndpointAddress |= num;
#ifdef MANY_ENDPOINTS
} else if (desc->bEndpointAddress & USB_DIR_IN) {
diff --git a/drivers/usb/gadget/file_storage.c b/drivers/usb/gadget/file_storage.c
index 2e71368f45b..b10fa31cc91 100644
--- a/drivers/usb/gadget/file_storage.c
+++ b/drivers/usb/gadget/file_storage.c
@@ -1,7 +1,7 @@
/*
* file_storage.c -- File-backed USB Storage Gadget, for USB development
*
- * Copyright (C) 2003-2007 Alan Stern
+ * Copyright (C) 2003-2008 Alan Stern
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -38,16 +38,17 @@
/*
* The File-backed Storage Gadget acts as a USB Mass Storage device,
- * appearing to the host as a disk drive. In addition to providing an
- * example of a genuinely useful gadget driver for a USB device, it also
- * illustrates a technique of double-buffering for increased throughput.
- * Last but not least, it gives an easy way to probe the behavior of the
- * Mass Storage drivers in a USB host.
+ * appearing to the host as a disk drive or as a CD-ROM drive. In addition
+ * to providing an example of a genuinely useful gadget driver for a USB
+ * device, it also illustrates a technique of double-buffering for increased
+ * throughput. Last but not least, it gives an easy way to probe the
+ * behavior of the Mass Storage drivers in a USB host.
*
* Backing storage is provided by a regular file or a block device, specified
* by the "file" module parameter. Access can be limited to read-only by
- * setting the optional "ro" module parameter. The gadget will indicate that
- * it has removable media if the optional "removable" module parameter is set.
+ * setting the optional "ro" module parameter. (For CD-ROM emulation,
+ * access is always read-only.) The gadget will indicate that it has
+ * removable media if the optional "removable" module parameter is set.
*
* The gadget supports the Control-Bulk (CB), Control-Bulk-Interrupt (CBI),
* and Bulk-Only (also known as Bulk-Bulk-Bulk or BBB) transports, selected
@@ -64,7 +65,12 @@
* The default number of LUNs is taken from the number of "file" elements;
* it is 1 if "file" is not given. If "removable" is not set then a backing
* file must be specified for each LUN. If it is set, then an unspecified
- * or empty backing filename means the LUN's medium is not loaded.
+ * or empty backing filename means the LUN's medium is not loaded. Ideally
+ * each LUN would be settable independently as a disk drive or a CD-ROM
+ * drive, but currently all LUNs have to be the same type. The CD-ROM
+ * emulation includes a single data track and no audio tracks; hence there
+ * need be only one backing file per LUN. Note also that the CD-ROM block
+ * length is set to 512 rather than the more common value 2048.
*
* Requirements are modest; only a bulk-in and a bulk-out endpoint are
* needed (an interrupt-out endpoint is also needed for CBI). The memory
@@ -91,6 +97,8 @@
* USB device controller (usually true),
* boolean to permit the driver to halt
* bulk endpoints
+ * cdrom Default false, boolean for whether to emulate
+ * a CD-ROM drive
* transport=XXX Default BBB, transport name (CB, CBI, or BBB)
* protocol=YYY Default SCSI, protocol name (RBC, 8020 or
* ATAPI, QIC, UFI, 8070, or SCSI;
@@ -103,15 +111,16 @@
* PAGE_CACHE_SIZE)
*
* If CONFIG_USB_FILE_STORAGE_TEST is not set, only the "file", "ro",
- * "removable", "luns", and "stall" options are available; default values
- * are used for everything else.
+ * "removable", "luns", "stall", and "cdrom" options are available; default
+ * values are used for everything else.
*
* The pathnames of the backing files and the ro settings are available in
* the attribute files "file" and "ro" in the lun subdirectory of the
* gadget's sysfs directory. If the "removable" option is set, writing to
* these files will simulate ejecting/loading the medium (writing an empty
* line means eject) and adjusting a write-enable tab. Changes to the ro
- * setting are not allowed when the medium is loaded.
+ * setting are not allowed when the medium is loaded or if CD-ROM emulation
+ * is being used.
*
* This gadget driver is heavily based on "Gadget Zero" by David Brownell.
* The driver's SCSI command interface was based on the "Information
@@ -261,7 +270,7 @@
#define DRIVER_DESC "File-backed Storage Gadget"
#define DRIVER_NAME "g_file_storage"
-#define DRIVER_VERSION "7 August 2007"
+#define DRIVER_VERSION "20 November 2008"
static const char longname[] = DRIVER_DESC;
static const char shortname[] = DRIVER_NAME;
@@ -341,6 +350,7 @@ static struct {
int removable;
int can_stall;
+ int cdrom;
char *transport_parm;
char *protocol_parm;
@@ -359,6 +369,7 @@ static struct {
.protocol_parm = "SCSI",
.removable = 0,
.can_stall = 1,
+ .cdrom = 0,
.vendor = DRIVER_VENDOR_ID,
.product = DRIVER_PRODUCT_ID,
.release = 0xffff, // Use controller chip type
@@ -382,6 +393,9 @@ MODULE_PARM_DESC(removable, "true to simulate removable media");
module_param_named(stall, mod_data.can_stall, bool, S_IRUGO);
MODULE_PARM_DESC(stall, "false to prevent bulk stalls");
+module_param_named(cdrom, mod_data.cdrom, bool, S_IRUGO);
+MODULE_PARM_DESC(cdrom, "true to emulate cdrom instead of disk");
+
/* In the non-TEST version, only the module parameters listed above
* are available. */
@@ -411,6 +425,10 @@ MODULE_PARM_DESC(buflen, "I/O buffer size");
/*-------------------------------------------------------------------------*/
+/* SCSI device types */
+#define TYPE_DISK 0x00
+#define TYPE_CDROM 0x05
+
/* USB protocol value = the transport method */
#define USB_PR_CBI 0x00 // Control/Bulk/Interrupt
#define USB_PR_CB 0x01 // Control/Bulk w/o interrupt
@@ -487,6 +505,8 @@ struct interrupt_data {
#define SC_READ_12 0xa8
#define SC_READ_CAPACITY 0x25
#define SC_READ_FORMAT_CAPACITIES 0x23
+#define SC_READ_HEADER 0x44
+#define SC_READ_TOC 0x43
#define SC_RELEASE 0x17
#define SC_REQUEST_SENSE 0x03
#define SC_RESERVE 0x16
@@ -2006,23 +2026,28 @@ static int do_inquiry(struct fsg_dev *fsg, struct fsg_buffhd *bh)
u8 *buf = (u8 *) bh->buf;
static char vendor_id[] = "Linux ";
- static char product_id[] = "File-Stor Gadget";
+ static char product_disk_id[] = "File-Stor Gadget";
+ static char product_cdrom_id[] = "File-CD Gadget ";
if (!fsg->curlun) { // Unsupported LUNs are okay
fsg->bad_lun_okay = 1;
memset(buf, 0, 36);
buf[0] = 0x7f; // Unsupported, no device-type
+ buf[4] = 31; // Additional length
return 36;
}
- memset(buf, 0, 8); // Non-removable, direct-access device
+ memset(buf, 0, 8);
+ buf[0] = (mod_data.cdrom ? TYPE_CDROM : TYPE_DISK);
if (mod_data.removable)
buf[1] = 0x80;
buf[2] = 2; // ANSI SCSI level 2
buf[3] = 2; // SCSI-2 INQUIRY data format
buf[4] = 31; // Additional length
// No special options
- sprintf(buf + 8, "%-8s%-16s%04x", vendor_id, product_id,
+ sprintf(buf + 8, "%-8s%-16s%04x", vendor_id,
+ (mod_data.cdrom ? product_cdrom_id :
+ product_disk_id),
mod_data.release);
return 36;
}
@@ -2101,6 +2126,75 @@ static int do_read_capacity(struct fsg_dev *fsg, struct fsg_buffhd *bh)
}
+static void store_cdrom_address(u8 *dest, int msf, u32 addr)
+{
+ if (msf) {
+ /* Convert to Minutes-Seconds-Frames */
+ addr >>= 2; /* Convert to 2048-byte frames */
+ addr += 2*75; /* Lead-in occupies 2 seconds */
+ dest[3] = addr % 75; /* Frames */
+ addr /= 75;
+ dest[2] = addr % 60; /* Seconds */
+ addr /= 60;
+ dest[1] = addr; /* Minutes */
+ dest[0] = 0; /* Reserved */
+ } else {
+ /* Absolute sector */
+ put_be32(dest, addr);
+ }
+}
+
+static int do_read_header(struct fsg_dev *fsg, struct fsg_buffhd *bh)
+{
+ struct lun *curlun = fsg->curlun;
+ int msf = fsg->cmnd[1] & 0x02;
+ u32 lba = get_be32(&fsg->cmnd[2]);
+ u8 *buf = (u8 *) bh->buf;
+
+ if ((fsg->cmnd[1] & ~0x02) != 0) { /* Mask away MSF */
+ curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+ return -EINVAL;
+ }
+ if (lba >= curlun->num_sectors) {
+ curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
+ return -EINVAL;
+ }
+
+ memset(buf, 0, 8);
+ buf[0] = 0x01; /* 2048 bytes of user data, rest is EC */
+ store_cdrom_address(&buf[4], msf, lba);
+ return 8;
+}
+
+
+static int do_read_toc(struct fsg_dev *fsg, struct fsg_buffhd *bh)
+{
+ struct lun *curlun = fsg->curlun;
+ int msf = fsg->cmnd[1] & 0x02;
+ int start_track = fsg->cmnd[6];
+ u8 *buf = (u8 *) bh->buf;
+
+ if ((fsg->cmnd[1] & ~0x02) != 0 || /* Mask away MSF */
+ start_track > 1) {
+ curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+ return -EINVAL;
+ }
+
+ memset(buf, 0, 20);
+ buf[1] = (20-2); /* TOC data length */
+ buf[2] = 1; /* First track number */
+ buf[3] = 1; /* Last track number */
+ buf[5] = 0x16; /* Data track, copying allowed */
+ buf[6] = 0x01; /* Only track is number 1 */
+ store_cdrom_address(&buf[8], msf, 0);
+
+ buf[13] = 0x16; /* Lead-out track is data */
+ buf[14] = 0xAA; /* Lead-out track number */
+ store_cdrom_address(&buf[16], msf, curlun->num_sectors);
+ return 20;
+}
+
+
static int do_mode_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh)
{
struct lun *curlun = fsg->curlun;
@@ -2848,6 +2942,26 @@ static int do_scsi_command(struct fsg_dev *fsg)
reply = do_read_capacity(fsg, bh);
break;
+ case SC_READ_HEADER:
+ if (!mod_data.cdrom)
+ goto unknown_cmnd;
+ fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]);
+ if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
+ (3<<7) | (0x1f<<1), 1,
+ "READ HEADER")) == 0)
+ reply = do_read_header(fsg, bh);
+ break;
+
+ case SC_READ_TOC:
+ if (!mod_data.cdrom)
+ goto unknown_cmnd;
+ fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]);
+ if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
+ (7<<6) | (1<<1), 1,
+ "READ TOC")) == 0)
+ reply = do_read_toc(fsg, bh);
+ break;
+
case SC_READ_FORMAT_CAPACITIES:
fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]);
if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
@@ -2933,6 +3047,7 @@ static int do_scsi_command(struct fsg_dev *fsg)
// Fall through
default:
+ unknown_cmnd:
fsg->data_size_from_cmnd = 0;
sprintf(unknown, "Unknown x%02x", fsg->cmnd[0]);
if ((reply = check_command(fsg, fsg->cmnd_size,
@@ -3498,6 +3613,7 @@ static int open_backing_file(struct lun *curlun, const char *filename)
struct inode *inode = NULL;
loff_t size;
loff_t num_sectors;
+ loff_t min_sectors;
/* R/W if we can, R/O if we must */
ro = curlun->ro;
@@ -3541,8 +3657,19 @@ static int open_backing_file(struct lun *curlun, const char *filename)
rc = (int) size;
goto out;
}
- num_sectors = size >> 9; // File size in 512-byte sectors
- if (num_sectors == 0) {
+ num_sectors = size >> 9; // File size in 512-byte blocks
+ min_sectors = 1;
+ if (mod_data.cdrom) {
+ num_sectors &= ~3; // Reduce to a multiple of 2048
+ min_sectors = 300*4; // Smallest track is 300 frames
+ if (num_sectors >= 256*60*75*4) {
+ num_sectors = (256*60*75 - 1) * 4;
+ LINFO(curlun, "file too big: %s\n", filename);
+ LINFO(curlun, "using only first %d blocks\n",
+ (int) num_sectors);
+ }
+ }
+ if (num_sectors < min_sectors) {
LINFO(curlun, "file too small: %s\n", filename);
rc = -ETOOSMALL;
goto out;
@@ -3845,9 +3972,12 @@ static int __init fsg_bind(struct usb_gadget *gadget)
goto out;
if (mod_data.removable) { // Enable the store_xxx attributes
- dev_attr_ro.attr.mode = dev_attr_file.attr.mode = 0644;
- dev_attr_ro.store = store_ro;
+ dev_attr_file.attr.mode = 0644;
dev_attr_file.store = store_file;
+ if (!mod_data.cdrom) {
+ dev_attr_ro.attr.mode = 0644;
+ dev_attr_ro.store = store_ro;
+ }
}
/* Find out how many LUNs there should be */
@@ -3872,6 +4002,8 @@ static int __init fsg_bind(struct usb_gadget *gadget)
for (i = 0; i < fsg->nluns; ++i) {
curlun = &fsg->luns[i];
curlun->ro = mod_data.ro[i];
+ if (mod_data.cdrom)
+ curlun->ro = 1;
curlun->dev.release = lun_release;
curlun->dev.parent = &gadget->dev;
curlun->dev.driver = &fsg_driver.driver;
@@ -4031,9 +4163,9 @@ static int __init fsg_bind(struct usb_gadget *gadget)
mod_data.protocol_name, mod_data.protocol_type);
DBG(fsg, "VendorID=x%04x, ProductID=x%04x, Release=x%04x\n",
mod_data.vendor, mod_data.product, mod_data.release);
- DBG(fsg, "removable=%d, stall=%d, buflen=%u\n",
+ DBG(fsg, "removable=%d, stall=%d, cdrom=%d, buflen=%u\n",
mod_data.removable, mod_data.can_stall,
- mod_data.buflen);
+ mod_data.cdrom, mod_data.buflen);
DBG(fsg, "I/O thread pid: %d\n", task_pid_nr(fsg->thread_task));
set_bit(REGISTERED, &fsg->atomic_bitflags);
@@ -4050,6 +4182,7 @@ out:
fsg->state = FSG_STATE_TERMINATED; // The thread is dead
fsg_unbind(gadget);
close_all_backing_files(fsg);
+ complete(&fsg->thread_notifier);
return rc;
}
diff --git a/drivers/usb/gadget/fsl_qe_udc.c b/drivers/usb/gadget/fsl_qe_udc.c
index f4027256509..d6c5bcd4006 100644
--- a/drivers/usb/gadget/fsl_qe_udc.c
+++ b/drivers/usb/gadget/fsl_qe_udc.c
@@ -26,6 +26,7 @@
#include
#include
#include
+#include
#include
#include
#include
@@ -370,6 +371,9 @@ static int qe_ep_bd_init(struct qe_udc *udc, unsigned char pipe_num)
/* alloc multi-ram for BD rings and set the ep parameters */
tmp_addr = cpm_muram_alloc(sizeof(struct qe_bd) * (bdring_len +
USB_BDRING_LEN_TX), QE_ALIGNMENT_OF_BD);
+ if (IS_ERR_VALUE(tmp_addr))
+ return -ENOMEM;
+
out_be16(&epparam->rbase, (u16)tmp_addr);
out_be16(&epparam->tbase, (u16)(tmp_addr +
(sizeof(struct qe_bd) * bdring_len)));
@@ -689,7 +693,7 @@ en_done2:
en_done1:
spin_unlock_irqrestore(&udc->lock, flags);
en_done:
- dev_dbg(udc->dev, "failed to initialize %s\n", ep->ep.name);
+ dev_err(udc->dev, "failed to initialize %s\n", ep->ep.name);
return -ENODEV;
}
@@ -2408,6 +2412,8 @@ static struct qe_udc __devinit *qe_udc_config(struct of_device *ofdev)
tmp_addr = cpm_muram_alloc((USB_MAX_ENDPOINTS *
sizeof(struct usb_ep_para)),
USB_EP_PARA_ALIGNMENT);
+ if (IS_ERR_VALUE(tmp_addr))
+ goto cleanup;
for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
out_be16(&usbpram->epptr[i], (u16)tmp_addr);
@@ -2513,7 +2519,7 @@ static int __devinit qe_udc_probe(struct of_device *ofdev,
/* Initialize the udc structure including QH member and other member */
udc_controller = qe_udc_config(ofdev);
if (!udc_controller) {
- dev_dbg(&ofdev->dev, "udc_controll is NULL\n");
+ dev_err(&ofdev->dev, "failed to initialize\n");
return -ENOMEM;
}
@@ -2568,7 +2574,7 @@ static int __devinit qe_udc_probe(struct of_device *ofdev,
/* create a buf for ZLP send, need to remain zeroed */
udc_controller->nullbuf = kzalloc(256, GFP_KERNEL);
if (udc_controller->nullbuf == NULL) {
- dev_dbg(udc_controller->dev, "cannot alloc nullbuf\n");
+ dev_err(udc_controller->dev, "cannot alloc nullbuf\n");
ret = -ENOMEM;
goto err3;
}
diff --git a/drivers/usb/gadget/gadget_chips.h b/drivers/usb/gadget/gadget_chips.h
index 4e3107dd2f3..ec6d439a2aa 100644
--- a/drivers/usb/gadget/gadget_chips.h
+++ b/drivers/usb/gadget/gadget_chips.h
@@ -110,7 +110,6 @@
#define gadget_is_at91(g) 0
#endif
-/* status unclear */
#ifdef CONFIG_USB_GADGET_IMX
#define gadget_is_imx(g) !strcmp("imx_udc", (g)->name)
#else
@@ -158,6 +157,11 @@
#define gadget_is_fsl_qe(g) 0
#endif
+#ifdef CONFIG_USB_GADGET_CI13XXX
+#define gadget_is_ci13xxx(g) (!strcmp("ci13xxx_udc", (g)->name))
+#else
+#define gadget_is_ci13xxx(g) 0
+#endif
// CONFIG_USB_GADGET_SX2
// CONFIG_USB_GADGET_AU1X00
@@ -225,6 +229,8 @@ static inline int usb_gadget_controller_number(struct usb_gadget *gadget)
return 0x21;
else if (gadget_is_fsl_qe(gadget))
return 0x22;
+ else if (gadget_is_ci13xxx(gadget))
+ return 0x23;
return -ENOENT;
}
diff --git a/drivers/usb/gadget/goku_udc.c b/drivers/usb/gadget/goku_udc.c
index 60aa04847b1..63419c4d503 100644
--- a/drivers/usb/gadget/goku_udc.c
+++ b/drivers/usb/gadget/goku_udc.c
@@ -1349,7 +1349,7 @@ int usb_gadget_register_driver(struct usb_gadget_driver *driver)
int retval;
if (!driver
- || driver->speed != USB_SPEED_FULL
+ || driver->speed < USB_SPEED_FULL
|| !driver->bind
|| !driver->disconnect
|| !driver->setup)
diff --git a/drivers/usb/gadget/imx_udc.c b/drivers/usb/gadget/imx_udc.c
new file mode 100644
index 00000000000..cde8fdf15d5
--- /dev/null
+++ b/drivers/usb/gadget/imx_udc.c
@@ -0,0 +1,1516 @@
+/*
+ * driver/usb/gadget/imx_udc.c
+ *
+ * Copyright (C) 2005 Mike Lee(eemike@gmail.com)
+ * Copyright (C) 2008 Darius Augulis
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+
+#include
+#include
+
+#include "imx_udc.h"
+
+static const char driver_name[] = "imx_udc";
+static const char ep0name[] = "ep0";
+
+void ep0_chg_stat(const char *label, struct imx_udc_struct *imx_usb,
+ enum ep0_state stat);
+
+/*******************************************************************************
+ * IMX UDC hardware related functions
+ *******************************************************************************
+ */
+
+void imx_udc_enable(struct imx_udc_struct *imx_usb)
+{
+ int temp = __raw_readl(imx_usb->base + USB_CTRL);
+ __raw_writel(temp | CTRL_FE_ENA | CTRL_AFE_ENA, imx_usb->base + USB_CTRL);
+ imx_usb->gadget.speed = USB_SPEED_FULL;
+}
+
+void imx_udc_disable(struct imx_udc_struct *imx_usb)
+{
+ int temp = __raw_readl(imx_usb->base + USB_CTRL);
+
+ __raw_writel(temp & ~(CTRL_FE_ENA | CTRL_AFE_ENA),
+ imx_usb->base + USB_CTRL);
+
+ ep0_chg_stat(__func__, imx_usb, EP0_IDLE);
+ imx_usb->gadget.speed = USB_SPEED_UNKNOWN;
+}
+
+void imx_udc_reset(struct imx_udc_struct *imx_usb)
+{
+ int temp = __raw_readl(imx_usb->base + USB_ENAB);
+
+ /* set RST bit */
+ __raw_writel(temp | ENAB_RST, imx_usb->base + USB_ENAB);
+
+ /* wait RST bit to clear */
+ do {} while (__raw_readl(imx_usb->base + USB_ENAB) & ENAB_RST);
+
+ /* wait CFG bit to assert */
+ do {} while (!(__raw_readl(imx_usb->base + USB_DADR) & DADR_CFG));
+
+ /* udc module is now ready */
+}
+
+void imx_udc_config(struct imx_udc_struct *imx_usb)
+{
+ u8 ep_conf[5];
+ u8 i, j, cfg;
+ struct imx_ep_struct *imx_ep;
+
+ /* wait CFG bit to assert */
+ do {} while (!(__raw_readl(imx_usb->base + USB_DADR) & DADR_CFG));
+
+ /* Download the endpoint buffer for endpoint 0. */
+ for (j = 0; j < 5; j++) {
+ i = (j == 2 ? imx_usb->imx_ep[0].fifosize : 0x00);
+ __raw_writeb(i, imx_usb->base + USB_DDAT);
+ do {} while (__raw_readl(imx_usb->base + USB_DADR) & DADR_BSY);
+ }
+
+ /* Download the endpoint buffers for endpoints 1-5.
+ * We specify two configurations, one interface
+ */
+ for (cfg = 1; cfg < 3; cfg++) {
+ for (i = 1; i < IMX_USB_NB_EP; i++) {
+ imx_ep = &imx_usb->imx_ep[i];
+ /* EP no | Config no */
+ ep_conf[0] = (i << 4) | (cfg << 2);
+ /* Type | Direction */
+ ep_conf[1] = (imx_ep->bmAttributes << 3) |
+ (EP_DIR(imx_ep) << 2);
+ /* Max packet size */
+ ep_conf[2] = imx_ep->fifosize;
+ /* TRXTYP */
+ ep_conf[3] = 0xC0;
+ /* FIFO no */
+ ep_conf[4] = i;
+
+ D_INI(imx_usb->dev,
+ "<%s> ep%d_conf[%d]:"
+ "[%02x-%02x-%02x-%02x-%02x]\n",
+ __func__, i, cfg,
+ ep_conf[0], ep_conf[1], ep_conf[2],
+ ep_conf[3], ep_conf[4]);
+
+ for (j = 0; j < 5; j++) {
+ __raw_writeb(ep_conf[j],
+ imx_usb->base + USB_DDAT);
+ do {} while (__raw_readl(imx_usb->base + USB_DADR)
+ & DADR_BSY);
+ }
+ }
+ }
+
+ /* wait CFG bit to clear */
+ do {} while (__raw_readl(imx_usb->base + USB_DADR) & DADR_CFG);
+}
+
+void imx_udc_init_irq(struct imx_udc_struct *imx_usb)
+{
+ int i;
+
+ /* Mask and clear all irqs */
+ __raw_writel(0xFFFFFFFF, imx_usb->base + USB_MASK);
+ __raw_writel(0xFFFFFFFF, imx_usb->base + USB_INTR);
+ for (i = 0; i < IMX_USB_NB_EP; i++) {
+ __raw_writel(0x1FF, imx_usb->base + USB_EP_MASK(i));
+ __raw_writel(0x1FF, imx_usb->base + USB_EP_INTR(i));
+ }
+
+ /* Enable USB irqs */
+ __raw_writel(INTR_MSOF | INTR_FRAME_MATCH, imx_usb->base + USB_MASK);
+
+ /* Enable EP0 irqs */
+ __raw_writel(0x1FF & ~(EPINTR_DEVREQ | EPINTR_MDEVREQ | EPINTR_EOT
+ | EPINTR_EOF | EPINTR_FIFO_EMPTY | EPINTR_FIFO_FULL),
+ imx_usb->base + USB_EP_MASK(0));
+}
+
+void imx_udc_init_ep(struct imx_udc_struct *imx_usb)
+{
+ int i, max, temp;
+ struct imx_ep_struct *imx_ep;
+ for (i = 0; i < IMX_USB_NB_EP; i++) {
+ imx_ep = &imx_usb->imx_ep[i];
+ switch (imx_ep->fifosize) {
+ case 8:
+ max = 0;
+ break;
+ case 16:
+ max = 1;
+ break;
+ case 32:
+ max = 2;
+ break;
+ case 64:
+ max = 3;
+ break;
+ default:
+ max = 1;
+ break;
+ }
+ temp = (EP_DIR(imx_ep) << 7) | (max << 5)
+ | (imx_ep->bmAttributes << 3);
+ __raw_writel(temp, imx_usb->base + USB_EP_STAT(i));
+ __raw_writel(temp | EPSTAT_FLUSH, imx_usb->base + USB_EP_STAT(i));
+ D_INI(imx_usb->dev, "<%s> ep%d_stat %08x\n", __func__, i,
+ __raw_readl(imx_usb->base + USB_EP_STAT(i)));
+ }
+}
+
+void imx_udc_init_fifo(struct imx_udc_struct *imx_usb)
+{
+ int i, temp;
+ struct imx_ep_struct *imx_ep;
+ for (i = 0; i < IMX_USB_NB_EP; i++) {
+ imx_ep = &imx_usb->imx_ep[i];
+
+ /* Fifo control */
+ temp = EP_DIR(imx_ep) ? 0x0B000000 : 0x0F000000;
+ __raw_writel(temp, imx_usb->base + USB_EP_FCTRL(i));
+ D_INI(imx_usb->dev, "<%s> ep%d_fctrl %08x\n", __func__, i,
+ __raw_readl(imx_usb->base + USB_EP_FCTRL(i)));
+
+ /* Fifo alarm */
+ temp = (i ? imx_ep->fifosize / 2 : 0);
+ __raw_writel(temp, imx_usb->base + USB_EP_FALRM(i));
+ D_INI(imx_usb->dev, "<%s> ep%d_falrm %08x\n", __func__, i,
+ __raw_readl(imx_usb->base + USB_EP_FALRM(i)));
+ }
+}
+
+static void imx_udc_init(struct imx_udc_struct *imx_usb)
+{
+ /* Reset UDC */
+ imx_udc_reset(imx_usb);
+
+ /* Download config to enpoint buffer */
+ imx_udc_config(imx_usb);
+
+ /* Setup interrups */
+ imx_udc_init_irq(imx_usb);
+
+ /* Setup endpoints */
+ imx_udc_init_ep(imx_usb);
+
+ /* Setup fifos */
+ imx_udc_init_fifo(imx_usb);
+}
+
+void imx_ep_irq_enable(struct imx_ep_struct *imx_ep)
+{
+
+ int i = EP_NO(imx_ep);
+
+ __raw_writel(0x1FF, imx_ep->imx_usb->base + USB_EP_MASK(i));
+ __raw_writel(0x1FF, imx_ep->imx_usb->base + USB_EP_INTR(i));
+ __raw_writel(0x1FF & ~(EPINTR_EOT | EPINTR_EOF),
+ imx_ep->imx_usb->base + USB_EP_MASK(i));
+}
+
+void imx_ep_irq_disable(struct imx_ep_struct *imx_ep)
+{
+
+ int i = EP_NO(imx_ep);
+
+ __raw_writel(0x1FF, imx_ep->imx_usb->base + USB_EP_MASK(i));
+ __raw_writel(0x1FF, imx_ep->imx_usb->base + USB_EP_INTR(i));
+}
+
+int imx_ep_empty(struct imx_ep_struct *imx_ep)
+{
+ struct imx_udc_struct *imx_usb = imx_ep->imx_usb;
+
+ return __raw_readl(imx_usb->base + USB_EP_FSTAT(EP_NO(imx_ep)))
+ & FSTAT_EMPTY;
+}
+
+unsigned imx_fifo_bcount(struct imx_ep_struct *imx_ep)
+{
+ struct imx_udc_struct *imx_usb = imx_ep->imx_usb;
+
+ return (__raw_readl(imx_usb->base + USB_EP_STAT(EP_NO(imx_ep)))
+ & EPSTAT_BCOUNT) >> 16;
+}
+
+void imx_flush(struct imx_ep_struct *imx_ep)
+{
+ struct imx_udc_struct *imx_usb = imx_ep->imx_usb;
+
+ int temp = __raw_readl(imx_usb->base + USB_EP_STAT(EP_NO(imx_ep)));
+ __raw_writel(temp | EPSTAT_FLUSH,
+ imx_usb->base + USB_EP_STAT(EP_NO(imx_ep)));
+}
+
+void imx_ep_stall(struct imx_ep_struct *imx_ep)
+{
+ struct imx_udc_struct *imx_usb = imx_ep->imx_usb;
+ int temp, i;
+
+ D_ERR(imx_usb->dev, "<%s> Forced stall on %s\n", __func__, imx_ep->ep.name);
+
+ imx_flush(imx_ep);
+
+ /* Special care for ep0 */
+ if (EP_NO(imx_ep)) {
+ temp = __raw_readl(imx_usb->base + USB_CTRL);
+ __raw_writel(temp | CTRL_CMDOVER | CTRL_CMDERROR, imx_usb->base + USB_CTRL);
+ do { } while (__raw_readl(imx_usb->base + USB_CTRL) & CTRL_CMDOVER);
+ temp = __raw_readl(imx_usb->base + USB_CTRL);
+ __raw_writel(temp & ~CTRL_CMDERROR, imx_usb->base + USB_CTRL);
+ }
+ else {
+ temp = __raw_readl(imx_usb->base + USB_EP_STAT(EP_NO(imx_ep)));
+ __raw_writel(temp | EPSTAT_STALL,
+ imx_usb->base + USB_EP_STAT(EP_NO(imx_ep)));
+
+ for (i = 0; i < 100; i ++) {
+ temp = __raw_readl(imx_usb->base + USB_EP_STAT(EP_NO(imx_ep)));
+ if (!temp & EPSTAT_STALL)
+ break;
+ udelay(20);
+ }
+ if (i == 50)
+ D_ERR(imx_usb->dev, "<%s> Non finished stall on %s\n",
+ __func__, imx_ep->ep.name);
+ }
+}
+
+static int imx_udc_get_frame(struct usb_gadget *_gadget)
+{
+ struct imx_udc_struct *imx_usb = container_of(_gadget,
+ struct imx_udc_struct, gadget);
+
+ return __raw_readl(imx_usb->base + USB_FRAME) & 0x7FF;
+}
+
+static int imx_udc_wakeup(struct usb_gadget *_gadget)
+{
+ return 0;
+}
+
+/*******************************************************************************
+ * USB request control functions
+ *******************************************************************************
+ */
+
+static void ep_add_request(struct imx_ep_struct *imx_ep, struct imx_request *req)
+{
+ if (unlikely(!req))
+ return;
+
+ req->in_use = 1;
+ list_add_tail(&req->queue, &imx_ep->queue);
+}
+
+static void ep_del_request(struct imx_ep_struct *imx_ep, struct imx_request *req)
+{
+ if (unlikely(!req))
+ return;
+
+ list_del_init(&req->queue);
+ req->in_use = 0;
+}
+
+static void done(struct imx_ep_struct *imx_ep, struct imx_request *req, int status)
+{
+ ep_del_request(imx_ep, req);
+
+ if (likely(req->req.status == -EINPROGRESS))
+ req->req.status = status;
+ else
+ status = req->req.status;
+
+ if (status && status != -ESHUTDOWN)
+ D_ERR(imx_ep->imx_usb->dev,
+ "<%s> complete %s req %p stat %d len %u/%u\n", __func__,
+ imx_ep->ep.name, &req->req, status,
+ req->req.actual, req->req.length);
+
+ req->req.complete(&imx_ep->ep, &req->req);
+}
+
+static void nuke(struct imx_ep_struct *imx_ep, int status)
+{
+ struct imx_request *req;
+
+ while (!list_empty(&imx_ep->queue)) {
+ req = list_entry(imx_ep->queue.next, struct imx_request, queue);
+ done(imx_ep, req, status);
+ }
+}
+
+/*******************************************************************************
+ * Data tansfer over USB functions
+ *******************************************************************************
+ */
+static int read_packet(struct imx_ep_struct *imx_ep, struct imx_request *req)
+{
+ u8 *buf;
+ int bytes_ep, bufferspace, count, i;
+
+ bytes_ep = imx_fifo_bcount(imx_ep);
+ bufferspace = req->req.length - req->req.actual;
+
+ buf = req->req.buf + req->req.actual;
+ prefetchw(buf);
+
+ if (unlikely(imx_ep_empty(imx_ep)))
+ count = 0; /* zlp */
+ else
+ count = min(bytes_ep, bufferspace);
+
+ for (i = count; i > 0; i--)
+ *buf++ = __raw_readb(imx_ep->imx_usb->base
+ + USB_EP_FDAT0(EP_NO(imx_ep)));
+ req->req.actual += count;
+
+ return count;
+}
+
+static int write_packet(struct imx_ep_struct *imx_ep, struct imx_request *req)
+{
+ u8 *buf;
+ int length, count, temp;
+
+ buf = req->req.buf + req->req.actual;
+ prefetch(buf);
+
+ length = min(req->req.length - req->req.actual, (u32)imx_ep->fifosize);
+
+ if (imx_fifo_bcount(imx_ep) + length > imx_ep->fifosize) {
+ D_TRX(imx_ep->imx_usb->dev, "<%s> packet overfill %s fifo\n",
+ __func__, imx_ep->ep.name);
+ return -1;
+ }
+
+ req->req.actual += length;
+ count = length;
+
+ if (!count && req->req.zero) { /* zlp */
+ temp = __raw_readl(imx_ep->imx_usb->base
+ + USB_EP_STAT(EP_NO(imx_ep)));
+ __raw_writel(temp | EPSTAT_ZLPS, imx_ep->imx_usb->base
+ + USB_EP_STAT(EP_NO(imx_ep)));
+ D_TRX(imx_ep->imx_usb->dev, "<%s> zero packet\n", __func__);
+ return 0;
+ }
+
+ while (count--) {
+ if (count == 0) { /* last byte */
+ temp = __raw_readl(imx_ep->imx_usb->base
+ + USB_EP_FCTRL(EP_NO(imx_ep)));
+ __raw_writel(temp | FCTRL_WFR, imx_ep->imx_usb->base
+ + USB_EP_FCTRL(EP_NO(imx_ep)));
+ }
+ __raw_writeb(*buf++,
+ imx_ep->imx_usb->base + USB_EP_FDAT0(EP_NO(imx_ep)));
+ }
+
+ return length;
+}
+
+static int read_fifo(struct imx_ep_struct *imx_ep, struct imx_request *req)
+{
+ int bytes = 0,
+ count,
+ completed = 0;
+
+ while (__raw_readl(imx_ep->imx_usb->base + USB_EP_FSTAT(EP_NO(imx_ep)))
+ & FSTAT_FR) {
+ count = read_packet(imx_ep, req);
+ bytes += count;
+
+ completed = (count != imx_ep->fifosize);
+ if (completed || req->req.actual == req->req.length) {
+ completed = 1;
+ break;
+ }
+ }
+
+ if (completed || !req->req.length) {
+ done(imx_ep, req, 0);
+ D_REQ(imx_ep->imx_usb->dev, "<%s> %s req<%p> %s\n",
+ __func__, imx_ep->ep.name, req,
+ completed ? "completed" : "not completed");
+ if (!EP_NO(imx_ep))
+ ep0_chg_stat(__func__, imx_ep->imx_usb, EP0_IDLE);
+ }
+
+ D_TRX(imx_ep->imx_usb->dev, "<%s> bytes read: %d\n", __func__, bytes);
+
+ return completed;
+}
+
+static int write_fifo(struct imx_ep_struct *imx_ep, struct imx_request *req)
+{
+ int bytes = 0,
+ count,
+ completed = 0;
+
+ while (!completed) {
+ count = write_packet(imx_ep, req);
+ if (count < 0)
+ break; /* busy */
+ bytes += count;
+
+ /* last packet "must be" short (or a zlp) */
+ completed = (count != imx_ep->fifosize);
+
+ if (unlikely(completed)) {
+ done(imx_ep, req, 0);
+ D_REQ(imx_ep->imx_usb->dev, "<%s> %s req<%p> %s\n",
+ __func__, imx_ep->ep.name, req,
+ completed ? "completed" : "not completed");
+ if (!EP_NO(imx_ep))
+ ep0_chg_stat(__func__, imx_ep->imx_usb, EP0_IDLE);
+ }
+ }
+
+ D_TRX(imx_ep->imx_usb->dev, "<%s> bytes sent: %d\n", __func__, bytes);
+
+ return completed;
+}
+
+/*******************************************************************************
+ * Endpoint handlers
+ *******************************************************************************
+ */
+static int handle_ep(struct imx_ep_struct *imx_ep)
+{
+ struct imx_request *req;
+ int completed = 0;
+
+ do {
+ if (!list_empty(&imx_ep->queue))
+ req = list_entry(imx_ep->queue.next,
+ struct imx_request, queue);
+ else {
+ D_REQ(imx_ep->imx_usb->dev, "<%s> no request on %s\n",
+ __func__, imx_ep->ep.name);
+ return 0;
+ }
+
+ if (EP_DIR(imx_ep)) /* to host */
+ completed = write_fifo(imx_ep, req);
+ else /* to device */
+ completed = read_fifo(imx_ep, req);
+
+ dump_ep_stat(__func__, imx_ep);
+
+ } while (completed);
+
+ return 0;
+}
+
+static int handle_ep0(struct imx_ep_struct *imx_ep)
+{
+ struct imx_request *req = NULL;
+ int ret = 0;
+
+ if (!list_empty(&imx_ep->queue))
+ req = list_entry(imx_ep->queue.next, struct imx_request, queue);
+
+ if (req) {
+ switch (imx_ep->imx_usb->ep0state) {
+
+ case EP0_IN_DATA_PHASE: /* GET_DESCRIPTOR */
+ write_fifo(imx_ep, req);
+ break;
+ case EP0_OUT_DATA_PHASE: /* SET_DESCRIPTOR */
+ read_fifo(imx_ep, req);
+ break;
+ default:
+ D_EP0(imx_ep->imx_usb->dev,
+ "<%s> ep0 i/o, odd state %d\n",
+ __func__, imx_ep->imx_usb->ep0state);
+ ep_del_request(imx_ep, req);
+ ret = -EL2HLT;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static void handle_ep0_devreq(struct imx_udc_struct *imx_usb)
+{
+ struct imx_ep_struct *imx_ep = &imx_usb->imx_ep[0];
+ union {
+ struct usb_ctrlrequest r;
+ u8 raw[8];
+ u32 word[2];
+ } u;
+ int temp, i;
+
+ nuke(imx_ep, -EPROTO);
+
+ /* read SETUP packet */
+ for (i = 0; i < 2; i++) {
+ if (imx_ep_empty(imx_ep)) {
+ D_ERR(imx_usb->dev,
+ "<%s> no setup packet received\n", __func__);
+ goto stall;
+ }
+ u.word[i] = __raw_readl(imx_usb->base + USB_EP_FDAT(EP_NO(imx_ep)));
+ }
+
+ temp = imx_ep_empty(imx_ep);
+ while (!imx_ep_empty(imx_ep)) {
+ i = __raw_readl(imx_usb->base + USB_EP_FDAT(EP_NO(imx_ep)));
+ D_ERR(imx_usb->dev,
+ "<%s> wrong to have extra bytes for setup : 0x%08x\n",
+ __func__, i);
+ }
+ if (!temp)
+ goto stall;
+
+ le16_to_cpus(&u.r.wValue);
+ le16_to_cpus(&u.r.wIndex);
+ le16_to_cpus(&u.r.wLength);
+
+ D_REQ(imx_usb->dev, "<%s> SETUP %02x.%02x v%04x i%04x l%04x\n",
+ __func__, u.r.bRequestType, u.r.bRequest,
+ u.r.wValue, u.r.wIndex, u.r.wLength);
+
+ if (imx_usb->set_config) {
+ /* NACK the host by using CMDOVER */
+ temp = __raw_readl(imx_usb->base + USB_CTRL);
+ __raw_writel(temp | CTRL_CMDOVER, imx_usb->base + USB_CTRL);
+
+ D_ERR(imx_usb->dev,
+ "<%s> set config req is pending, NACK the host\n",
+ __func__);
+ return;
+ }
+
+ if (u.r.bRequestType & USB_DIR_IN)
+ ep0_chg_stat(__func__, imx_usb, EP0_IN_DATA_PHASE);
+ else
+ ep0_chg_stat(__func__, imx_usb, EP0_OUT_DATA_PHASE);
+
+ i = imx_usb->driver->setup(&imx_usb->gadget, &u.r);
+ if (i < 0) {
+ D_ERR(imx_usb->dev, "<%s> device setup error %d\n",
+ __func__, i);
+ goto stall;
+ }
+
+ return;
+stall:
+ D_ERR(imx_usb->dev, "<%s> protocol STALL\n", __func__);
+ imx_ep_stall(imx_ep);
+ ep0_chg_stat(__func__, imx_usb, EP0_STALL);
+ return;
+}
+
+/*******************************************************************************
+ * USB gadget callback functions
+ *******************************************************************************
+ */
+
+static int imx_ep_enable(struct usb_ep *usb_ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ struct imx_ep_struct *imx_ep = container_of(usb_ep,
+ struct imx_ep_struct, ep);
+ struct imx_udc_struct *imx_usb = imx_ep->imx_usb;
+ unsigned long flags;
+
+ if (!usb_ep
+ || !desc
+ || !EP_NO(imx_ep)
+ || desc->bDescriptorType != USB_DT_ENDPOINT
+ || imx_ep->bEndpointAddress != desc->bEndpointAddress) {
+ D_ERR(imx_usb->dev,
+ "<%s> bad ep or descriptor\n", __func__);
+ return -EINVAL;
+ }
+
+ if (imx_ep->bmAttributes != desc->bmAttributes) {
+ D_ERR(imx_usb->dev,
+ "<%s> %s type mismatch\n", __func__, usb_ep->name);
+ return -EINVAL;
+ }
+
+ if (imx_ep->fifosize < le16_to_cpu(desc->wMaxPacketSize)) {
+ D_ERR(imx_usb->dev,
+ "<%s> bad %s maxpacket\n", __func__, usb_ep->name);
+ return -ERANGE;
+ }
+
+ if (!imx_usb->driver || imx_usb->gadget.speed == USB_SPEED_UNKNOWN) {
+ D_ERR(imx_usb->dev, "<%s> bogus device state\n", __func__);
+ return -ESHUTDOWN;
+ }
+
+ local_irq_save(flags);
+
+ imx_ep->stopped = 0;
+ imx_flush(imx_ep);
+ imx_ep_irq_enable(imx_ep);
+
+ local_irq_restore(flags);
+
+ D_EPX(imx_usb->dev, "<%s> ENABLED %s\n", __func__, usb_ep->name);
+ return 0;
+}
+
+static int imx_ep_disable(struct usb_ep *usb_ep)
+{
+ struct imx_ep_struct *imx_ep = container_of(usb_ep,
+ struct imx_ep_struct, ep);
+ unsigned long flags;
+
+ if (!usb_ep || !EP_NO(imx_ep) || !list_empty(&imx_ep->queue)) {
+ D_ERR(imx_ep->imx_usb->dev, "<%s> %s can not be disabled\n",
+ __func__, usb_ep ? imx_ep->ep.name : NULL);
+ return -EINVAL;
+ }
+
+ local_irq_save(flags);
+
+ imx_ep->stopped = 1;
+ nuke(imx_ep, -ESHUTDOWN);
+ imx_flush(imx_ep);
+ imx_ep_irq_disable(imx_ep);
+
+ local_irq_restore(flags);
+
+ D_EPX(imx_ep->imx_usb->dev,
+ "<%s> DISABLED %s\n", __func__, usb_ep->name);
+ return 0;
+}
+
+static struct usb_request *imx_ep_alloc_request
+ (struct usb_ep *usb_ep, gfp_t gfp_flags)
+{
+ struct imx_request *req;
+
+ req = kzalloc(sizeof *req, gfp_flags);
+ if (!req || !usb_ep)
+ return 0;
+
+ INIT_LIST_HEAD(&req->queue);
+ req->in_use = 0;
+
+ return &req->req;
+}
+
+static void imx_ep_free_request
+ (struct usb_ep *usb_ep, struct usb_request *usb_req)
+{
+ struct imx_request *req;
+
+ req = container_of(usb_req, struct imx_request, req);
+ WARN_ON(!list_empty(&req->queue));
+ kfree(req);
+}
+
+static int imx_ep_queue
+ (struct usb_ep *usb_ep, struct usb_request *usb_req, gfp_t gfp_flags)
+{
+ struct imx_ep_struct *imx_ep;
+ struct imx_udc_struct *imx_usb;
+ struct imx_request *req;
+ unsigned long flags;
+ int ret = 0;
+
+ imx_ep = container_of(usb_ep, struct imx_ep_struct, ep);
+ imx_usb = imx_ep->imx_usb;
+ req = container_of(usb_req, struct imx_request, req);
+
+ /*
+ Special care on IMX udc.
+ Ignore enqueue when after set configuration from the
+ host. This assume all gadget drivers reply set
+ configuration with the next ep0 req enqueue.
+ */
+ if (imx_usb->set_config && !EP_NO(imx_ep)) {
+ imx_usb->set_config = 0;
+ D_EPX(imx_usb->dev,
+ "<%s> gadget reply set config\n", __func__);
+ return 0;
+ }
+
+ if (unlikely(!usb_req || !req || !usb_req->complete || !usb_req->buf)) {
+ D_ERR(imx_usb->dev, "<%s> bad params\n", __func__);
+ return -EINVAL;
+ }
+
+ if (unlikely(!usb_ep || !imx_ep)) {
+ D_ERR(imx_usb->dev, "<%s> bad ep\n", __func__);
+ return -EINVAL;
+ }
+
+ if (!imx_usb->driver || imx_usb->gadget.speed == USB_SPEED_UNKNOWN) {
+ D_ERR(imx_usb->dev, "<%s> bogus device state\n", __func__);
+ return -ESHUTDOWN;
+ }
+
+ local_irq_save(flags);
+
+ /* Debug */
+ D_REQ(imx_usb->dev, "<%s> ep%d %s request for [%d] bytes\n",
+ __func__, EP_NO(imx_ep),
+ ((!EP_NO(imx_ep) && imx_ep->imx_usb->ep0state == EP0_IN_DATA_PHASE)
+ || (EP_NO(imx_ep) && EP_DIR(imx_ep))) ? "IN" : "OUT", usb_req->length);
+ dump_req(__func__, imx_ep, usb_req);
+
+ if (imx_ep->stopped) {
+ usb_req->status = -ESHUTDOWN;
+ ret = -ESHUTDOWN;
+ goto out;
+ }
+
+ if (req->in_use) {
+ D_ERR(imx_usb->dev,
+ "<%s> refusing to queue req %p (already queued)\n",
+ __func__, req);
+ goto out;
+ }
+
+ usb_req->status = -EINPROGRESS;
+ usb_req->actual = 0;
+
+ ep_add_request(imx_ep, req);
+
+ if (!EP_NO(imx_ep))
+ ret = handle_ep0(imx_ep);
+ else
+ ret = handle_ep(imx_ep);
+out:
+ local_irq_restore(flags);
+ return ret;
+}
+
+static int imx_ep_dequeue(struct usb_ep *usb_ep, struct usb_request *usb_req)
+{
+
+ struct imx_ep_struct *imx_ep = container_of
+ (usb_ep, struct imx_ep_struct, ep);
+ struct imx_request *req;
+ unsigned long flags;
+
+ if (unlikely(!usb_ep || !EP_NO(imx_ep))) {
+ D_ERR(imx_ep->imx_usb->dev, "<%s> bad ep\n", __func__);
+ return -EINVAL;
+ }
+
+ local_irq_save(flags);
+
+ /* make sure it's actually queued on this endpoint */
+ list_for_each_entry(req, &imx_ep->queue, queue) {
+ if (&req->req == usb_req)
+ break;
+ }
+ if (&req->req != usb_req) {
+ local_irq_restore(flags);
+ return -EINVAL;
+ }
+
+ done(imx_ep, req, -ECONNRESET);
+
+ local_irq_restore(flags);
+ return 0;
+}
+
+static int imx_ep_set_halt(struct usb_ep *usb_ep, int value)
+{
+ struct imx_ep_struct *imx_ep = container_of
+ (usb_ep, struct imx_ep_struct, ep);
+ unsigned long flags;
+
+ if (unlikely(!usb_ep || !EP_NO(imx_ep))) {
+ D_ERR(imx_ep->imx_usb->dev, "<%s> bad ep\n", __func__);
+ return -EINVAL;
+ }
+
+ local_irq_save(flags);
+
+ if ((imx_ep->bEndpointAddress & USB_DIR_IN)
+ && !list_empty(&imx_ep->queue)) {
+ local_irq_restore(flags);
+ return -EAGAIN;
+ }
+
+ imx_ep_stall(imx_ep);
+
+ local_irq_restore(flags);
+
+ D_EPX(imx_ep->imx_usb->dev, "<%s> %s halt\n", __func__, usb_ep->name);
+ return 0;
+}
+
+static int imx_ep_fifo_status(struct usb_ep *usb_ep)
+{
+ struct imx_ep_struct *imx_ep = container_of
+ (usb_ep, struct imx_ep_struct, ep);
+
+ if (!usb_ep) {
+ D_ERR(imx_ep->imx_usb->dev, "<%s> bad ep\n", __func__);
+ return -ENODEV;
+ }
+
+ if (imx_ep->imx_usb->gadget.speed == USB_SPEED_UNKNOWN)
+ return 0;
+ else
+ return imx_fifo_bcount(imx_ep);
+}
+
+static void imx_ep_fifo_flush(struct usb_ep *usb_ep)
+{
+ struct imx_ep_struct *imx_ep = container_of
+ (usb_ep, struct imx_ep_struct, ep);
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ if (!usb_ep || !EP_NO(imx_ep) || !list_empty(&imx_ep->queue)) {
+ D_ERR(imx_ep->imx_usb->dev, "<%s> bad ep\n", __func__);
+ local_irq_restore(flags);
+ return;
+ }
+
+ /* toggle and halt bits stay unchanged */
+ imx_flush(imx_ep);
+
+ local_irq_restore(flags);
+}
+
+static struct usb_ep_ops imx_ep_ops = {
+ .enable = imx_ep_enable,
+ .disable = imx_ep_disable,
+
+ .alloc_request = imx_ep_alloc_request,
+ .free_request = imx_ep_free_request,
+
+ .queue = imx_ep_queue,
+ .dequeue = imx_ep_dequeue,
+
+ .set_halt = imx_ep_set_halt,
+ .fifo_status = imx_ep_fifo_status,
+ .fifo_flush = imx_ep_fifo_flush,
+};
+
+/*******************************************************************************
+ * USB endpoint control functions
+ *******************************************************************************
+ */
+
+void ep0_chg_stat(const char *label,
+ struct imx_udc_struct *imx_usb, enum ep0_state stat)
+{
+ D_EP0(imx_usb->dev, "<%s> from %15s to %15s\n",
+ label, state_name[imx_usb->ep0state], state_name[stat]);
+
+ if (imx_usb->ep0state == stat)
+ return;
+
+ imx_usb->ep0state = stat;
+}
+
+static void usb_init_data(struct imx_udc_struct *imx_usb)
+{
+ struct imx_ep_struct *imx_ep;
+ u8 i;
+
+ /* device/ep0 records init */
+ INIT_LIST_HEAD(&imx_usb->gadget.ep_list);
+ INIT_LIST_HEAD(&imx_usb->gadget.ep0->ep_list);
+ ep0_chg_stat(__func__, imx_usb, EP0_IDLE);
+
+ /* basic endpoint records init */
+ for (i = 0; i < IMX_USB_NB_EP; i++) {
+ imx_ep = &imx_usb->imx_ep[i];
+
+ if (i) {
+ list_add_tail(&imx_ep->ep.ep_list,
+ &imx_usb->gadget.ep_list);
+ imx_ep->stopped = 1;
+ } else
+ imx_ep->stopped = 0;
+
+ INIT_LIST_HEAD(&imx_ep->queue);
+ }
+}
+
+static void udc_stop_activity(struct imx_udc_struct *imx_usb,
+ struct usb_gadget_driver *driver)
+{
+ struct imx_ep_struct *imx_ep;
+ int i;
+
+ if (imx_usb->gadget.speed == USB_SPEED_UNKNOWN)
+ driver = NULL;
+
+ /* prevent new request submissions, kill any outstanding requests */
+ for (i = 1; i < IMX_USB_NB_EP; i++) {
+ imx_ep = &imx_usb->imx_ep[i];
+ imx_flush(imx_ep);
+ imx_ep->stopped = 1;
+ imx_ep_irq_disable(imx_ep);
+ nuke(imx_ep, -ESHUTDOWN);
+ }
+
+ imx_usb->cfg = 0;
+ imx_usb->intf = 0;
+ imx_usb->alt = 0;
+
+ if (driver)
+ driver->disconnect(&imx_usb->gadget);
+}
+
+/*******************************************************************************
+ * Interrupt handlers
+ *******************************************************************************
+ */
+
+static irqreturn_t imx_udc_irq(int irq, void *dev)
+{
+ struct imx_udc_struct *imx_usb = dev;
+ struct usb_ctrlrequest u;
+ int temp, cfg, intf, alt;
+ int intr = __raw_readl(imx_usb->base + USB_INTR);
+
+ if (intr & (INTR_WAKEUP | INTR_SUSPEND | INTR_RESUME | INTR_RESET_START
+ | INTR_RESET_STOP | INTR_CFG_CHG)) {
+ dump_intr(__func__, intr, imx_usb->dev);
+ dump_usb_stat(__func__, imx_usb);
+ }
+
+ if (!imx_usb->driver) {
+ /*imx_udc_disable(imx_usb);*/
+ goto end_irq;
+ }
+
+ if (intr & INTR_WAKEUP) {
+ if (imx_usb->gadget.speed == USB_SPEED_UNKNOWN
+ && imx_usb->driver && imx_usb->driver->resume)
+ imx_usb->driver->resume(&imx_usb->gadget);
+ imx_usb->set_config = 0;
+ imx_usb->gadget.speed = USB_SPEED_FULL;
+ }
+
+ if (intr & INTR_SUSPEND) {
+ if (imx_usb->gadget.speed != USB_SPEED_UNKNOWN
+ && imx_usb->driver && imx_usb->driver->suspend)
+ imx_usb->driver->suspend(&imx_usb->gadget);
+ imx_usb->set_config = 0;
+ imx_usb->gadget.speed = USB_SPEED_UNKNOWN;
+ }
+
+ if (intr & INTR_RESET_START) {
+ __raw_writel(intr, imx_usb->base + USB_INTR);
+ udc_stop_activity(imx_usb, imx_usb->driver);
+ imx_usb->set_config = 0;
+ imx_usb->gadget.speed = USB_SPEED_UNKNOWN;
+ }
+
+ if (intr & INTR_RESET_STOP)
+ imx_usb->gadget.speed = USB_SPEED_FULL;
+
+ if (intr & INTR_CFG_CHG) {
+ __raw_writel(INTR_CFG_CHG, imx_usb->base + USB_INTR);
+ temp = __raw_readl(imx_usb->base + USB_STAT);
+ cfg = (temp & STAT_CFG) >> 5;
+ intf = (temp & STAT_INTF) >> 3;
+ alt = temp & STAT_ALTSET;
+
+ D_REQ(imx_usb->dev,
+ "<%s> orig config C=%d, I=%d, A=%d / "
+ "req config C=%d, I=%d, A=%d\n",
+ __func__, imx_usb->cfg, imx_usb->intf, imx_usb->alt,
+ cfg, intf, alt);
+
+ if (cfg != 1 && cfg != 2)
+ goto end_irq;
+
+ imx_usb->set_config = 0;
+
+ /* Config setup */
+ if (imx_usb->cfg != cfg) {
+ D_REQ(imx_usb->dev, "<%s> Change config start\n",__func__);
+ u.bRequest = USB_REQ_SET_CONFIGURATION;
+ u.bRequestType = USB_DIR_OUT |
+ USB_TYPE_STANDARD |
+ USB_RECIP_DEVICE;
+ u.wValue = cfg;
+ u.wIndex = 0;
+ u.wLength = 0;
+ imx_usb->cfg = cfg;
+ imx_usb->set_config = 1;
+ imx_usb->driver->setup(&imx_usb->gadget, &u);
+ imx_usb->set_config = 0;
+ D_REQ(imx_usb->dev, "<%s> Change config done\n",__func__);
+
+ }
+ if (imx_usb->intf != intf || imx_usb->alt != alt) {
+ D_REQ(imx_usb->dev, "<%s> Change interface start\n",__func__);
+ u.bRequest = USB_REQ_SET_INTERFACE;
+ u.bRequestType = USB_DIR_OUT |
+ USB_TYPE_STANDARD |
+ USB_RECIP_INTERFACE;
+ u.wValue = alt;
+ u.wIndex = intf;
+ u.wLength = 0;
+ imx_usb->intf = intf;
+ imx_usb->alt = alt;
+ imx_usb->set_config = 1;
+ imx_usb->driver->setup(&imx_usb->gadget, &u);
+ imx_usb->set_config = 0;
+ D_REQ(imx_usb->dev, "<%s> Change interface done\n",__func__);
+ }
+ }
+
+ if (intr & INTR_SOF) {
+ if (imx_usb->ep0state == EP0_IDLE) {
+ temp = __raw_readl(imx_usb->base + USB_CTRL);
+ __raw_writel(temp | CTRL_CMDOVER, imx_usb->base + USB_CTRL);
+ }
+ }
+
+end_irq:
+ __raw_writel(intr, imx_usb->base + USB_INTR);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t imx_udc_ctrl_irq(int irq, void *dev)
+{
+ struct imx_udc_struct *imx_usb = dev;
+ int intr = __raw_readl(imx_usb->base + USB_EP_INTR(0));
+
+ dump_ep_intr(__func__, 0, intr, imx_usb->dev);
+
+ if (!imx_usb->driver) {
+ __raw_writel(intr, imx_usb->base + USB_EP_INTR(0));
+ return IRQ_HANDLED;
+ }
+
+ /* DEVREQ IRQ has highest priority */
+ if (intr & (EPINTR_DEVREQ | EPINTR_MDEVREQ))
+ handle_ep0_devreq(imx_usb);
+ /* Seem i.MX is missing EOF interrupt sometimes.
+ * Therefore we monitor both EOF and FIFO_EMPTY interrups
+ * when transmiting, and both EOF and FIFO_FULL when
+ * receiving data.
+ */
+ else if (intr & (EPINTR_EOF | EPINTR_FIFO_EMPTY | EPINTR_FIFO_FULL))
+ handle_ep0(&imx_usb->imx_ep[0]);
+
+ __raw_writel(intr, imx_usb->base + USB_EP_INTR(0));
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t imx_udc_bulk_irq(int irq, void *dev)
+{
+ struct imx_udc_struct *imx_usb = dev;
+ struct imx_ep_struct *imx_ep = &imx_usb->imx_ep[irq - USBD_INT0];
+ int intr = __raw_readl(imx_usb->base + USB_EP_INTR(EP_NO(imx_ep)));
+
+ dump_ep_intr(__func__, irq - USBD_INT0, intr, imx_usb->dev);
+
+ if (!imx_usb->driver) {
+ __raw_writel(intr, imx_usb->base + USB_EP_INTR(EP_NO(imx_ep)));
+ return IRQ_HANDLED;
+ }
+
+ handle_ep(imx_ep);
+
+ __raw_writel(intr, imx_usb->base + USB_EP_INTR(EP_NO(imx_ep)));
+
+ return IRQ_HANDLED;
+}
+
+irq_handler_t intr_handler(int i)
+{
+ switch (i) {
+ case 0:
+ return imx_udc_ctrl_irq;
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ return imx_udc_bulk_irq;
+ default:
+ return imx_udc_irq;
+ }
+}
+
+/*******************************************************************************
+ * Static defined IMX UDC structure
+ *******************************************************************************
+ */
+
+static const struct usb_gadget_ops imx_udc_ops = {
+ .get_frame = imx_udc_get_frame,
+ .wakeup = imx_udc_wakeup,
+};
+
+static struct imx_udc_struct controller = {
+ .gadget = {
+ .ops = &imx_udc_ops,
+ .ep0 = &controller.imx_ep[0].ep,
+ .name = driver_name,
+ .dev = {
+ .bus_id = "gadget",
+ },
+ },
+
+ .imx_ep[0] = {
+ .ep = {
+ .name = ep0name,
+ .ops = &imx_ep_ops,
+ .maxpacket = 32,
+ },
+ .imx_usb = &controller,
+ .fifosize = 32,
+ .bEndpointAddress = 0,
+ .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
+ },
+ .imx_ep[1] = {
+ .ep = {
+ .name = "ep1in-bulk",
+ .ops = &imx_ep_ops,
+ .maxpacket = 64,
+ },
+ .imx_usb = &controller,
+ .fifosize = 64,
+ .bEndpointAddress = USB_DIR_IN | 1,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ },
+ .imx_ep[2] = {
+ .ep = {
+ .name = "ep2out-bulk",
+ .ops = &imx_ep_ops,
+ .maxpacket = 64,
+ },
+ .imx_usb = &controller,
+ .fifosize = 64,
+ .bEndpointAddress = USB_DIR_OUT | 2,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ },
+ .imx_ep[3] = {
+ .ep = {
+ .name = "ep3out-bulk",
+ .ops = &imx_ep_ops,
+ .maxpacket = 32,
+ },
+ .imx_usb = &controller,
+ .fifosize = 32,
+ .bEndpointAddress = USB_DIR_OUT | 3,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ },
+ .imx_ep[4] = {
+ .ep = {
+ .name = "ep4in-int",
+ .ops = &imx_ep_ops,
+ .maxpacket = 32,
+ },
+ .imx_usb = &controller,
+ .fifosize = 32,
+ .bEndpointAddress = USB_DIR_IN | 4,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ },
+ .imx_ep[5] = {
+ .ep = {
+ .name = "ep5out-int",
+ .ops = &imx_ep_ops,
+ .maxpacket = 32,
+ },
+ .imx_usb = &controller,
+ .fifosize = 32,
+ .bEndpointAddress = USB_DIR_OUT | 5,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ },
+};
+
+/*******************************************************************************
+ * USB gadged driver functions
+ *******************************************************************************
+ */
+int usb_gadget_register_driver(struct usb_gadget_driver *driver)
+{
+ struct imx_udc_struct *imx_usb = &controller;
+ int retval;
+
+ if (!driver
+ || driver->speed < USB_SPEED_FULL
+ || !driver->bind
+ || !driver->disconnect
+ || !driver->setup)
+ return -EINVAL;
+ if (!imx_usb)
+ return -ENODEV;
+ if (imx_usb->driver)
+ return -EBUSY;
+
+ /* first hook up the driver ... */
+ imx_usb->driver = driver;
+ imx_usb->gadget.dev.driver = &driver->driver;
+
+ retval = device_add(&imx_usb->gadget.dev);
+ if (retval)
+ goto fail;
+ retval = driver->bind(&imx_usb->gadget);
+ if (retval) {
+ D_ERR(imx_usb->dev, "<%s> bind to driver %s --> error %d\n",
+ __func__, driver->driver.name, retval);
+ device_del(&imx_usb->gadget.dev);
+
+ goto fail;
+ }
+
+ D_INI(imx_usb->dev, "<%s> registered gadget driver '%s'\n",
+ __func__, driver->driver.name);
+
+ imx_udc_enable(imx_usb);
+
+ return 0;
+fail:
+ imx_usb->driver = NULL;
+ imx_usb->gadget.dev.driver = NULL;
+ return retval;
+}
+EXPORT_SYMBOL(usb_gadget_register_driver);
+
+int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
+{
+ struct imx_udc_struct *imx_usb = &controller;
+
+ if (!imx_usb)
+ return -ENODEV;
+ if (!driver || driver != imx_usb->driver || !driver->unbind)
+ return -EINVAL;
+
+ udc_stop_activity(imx_usb, driver);
+ imx_udc_disable(imx_usb);
+
+ driver->unbind(&imx_usb->gadget);
+ imx_usb->gadget.dev.driver = NULL;
+ imx_usb->driver = NULL;
+
+ device_del(&imx_usb->gadget.dev);
+
+ D_INI(imx_usb->dev, "<%s> unregistered gadget driver '%s'\n",
+ __func__, driver->driver.name);
+
+ return 0;
+}
+EXPORT_SYMBOL(usb_gadget_unregister_driver);
+
+/*******************************************************************************
+ * Module functions
+ *******************************************************************************
+ */
+
+static int __init imx_udc_probe(struct platform_device *pdev)
+{
+ struct imx_udc_struct *imx_usb = &controller;
+ struct resource *res;
+ struct imxusb_platform_data *pdata;
+ struct clk *clk;
+ void __iomem *base;
+ int ret = 0;
+ int i, res_size;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "can't get device resources\n");
+ return -ENODEV;
+ }
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ dev_err(&pdev->dev, "driver needs platform data\n");
+ return -ENODEV;
+ }
+
+ res_size = res->end - res->start + 1;
+ if (!request_mem_region(res->start, res_size, res->name)) {
+ dev_err(&pdev->dev, "can't allocate %d bytes at %d address\n",
+ res_size, res->start);
+ return -ENOMEM;
+ }
+
+ if (pdata->init) {
+ ret = pdata->init(&pdev->dev);
+ if (ret)
+ goto fail0;
+ }
+
+ base = ioremap(res->start, res_size);
+ if (!base) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ ret = -EIO;
+ goto fail1;
+ }
+
+ clk = clk_get(NULL, "usbd_clk");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ dev_err(&pdev->dev, "can't get USB clock\n");
+ goto fail2;
+ }
+ clk_enable(clk);
+
+ if (clk_get_rate(clk) != 48000000) {
+ D_INI(&pdev->dev,
+ "Bad USB clock (%d Hz), changing to 48000000 Hz\n",
+ (int)clk_get_rate(clk));
+ if (clk_set_rate(clk, 48000000)) {
+ dev_err(&pdev->dev,
+ "Unable to set correct USB clock (48MHz)\n");
+ ret = -EIO;
+ goto fail3;
+ }
+ }
+
+ for (i = 0; i < IMX_USB_NB_EP + 1; i++) {
+ imx_usb->usbd_int[i] = platform_get_irq(pdev, i);
+ if (imx_usb->usbd_int[i] < 0) {
+ dev_err(&pdev->dev, "can't get irq number\n");
+ ret = -ENODEV;
+ goto fail3;
+ }
+ }
+
+ for (i = 0; i < IMX_USB_NB_EP + 1; i++) {
+ ret = request_irq(imx_usb->usbd_int[i], intr_handler(i),
+ IRQF_DISABLED, driver_name, imx_usb);
+ if (ret) {
+ dev_err(&pdev->dev, "can't get irq %i, err %d\n",
+ imx_usb->usbd_int[i], ret);
+ for (--i; i >= 0; i--)
+ free_irq(imx_usb->usbd_int[i], imx_usb);
+ goto fail3;
+ }
+ }
+
+ imx_usb->res = res;
+ imx_usb->base = base;
+ imx_usb->clk = clk;
+ imx_usb->dev = &pdev->dev;
+
+ device_initialize(&imx_usb->gadget.dev);
+
+ imx_usb->gadget.dev.parent = &pdev->dev;
+ imx_usb->gadget.dev.dma_mask = pdev->dev.dma_mask;
+
+ platform_set_drvdata(pdev, imx_usb);
+
+ usb_init_data(imx_usb);
+ imx_udc_init(imx_usb);
+
+ return 0;
+
+fail3:
+ clk_put(clk);
+ clk_disable(clk);
+fail2:
+ iounmap(base);
+fail1:
+ if (pdata->exit)
+ pdata->exit(&pdev->dev);
+fail0:
+ release_mem_region(res->start, res_size);
+ return ret;
+}
+
+static int __exit imx_udc_remove(struct platform_device *pdev)
+{
+ struct imx_udc_struct *imx_usb = platform_get_drvdata(pdev);
+ struct imxusb_platform_data *pdata = pdev->dev.platform_data;
+ int i;
+
+ imx_udc_disable(imx_usb);
+
+ for (i = 0; i < IMX_USB_NB_EP + 1; i++)
+ free_irq(imx_usb->usbd_int[i], imx_usb);
+
+ clk_put(imx_usb->clk);
+ clk_disable(imx_usb->clk);
+ iounmap(imx_usb->base);
+
+ release_mem_region(imx_usb->res->start,
+ imx_usb->res->end - imx_usb->res->start + 1);
+
+ if (pdata->exit)
+ pdata->exit(&pdev->dev);
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+/*----------------------------------------------------------------------------*/
+
+#ifdef CONFIG_PM
+#define imx_udc_suspend NULL
+#define imx_udc_resume NULL
+#else
+#define imx_udc_suspend NULL
+#define imx_udc_resume NULL
+#endif
+
+/*----------------------------------------------------------------------------*/
+
+static struct platform_driver udc_driver = {
+ .driver = {
+ .name = driver_name,
+ .owner = THIS_MODULE,
+ },
+ .remove = __exit_p(imx_udc_remove),
+ .suspend = imx_udc_suspend,
+ .resume = imx_udc_resume,
+};
+
+static int __init udc_init(void)
+{
+ return platform_driver_probe(&udc_driver, imx_udc_probe);
+}
+module_init(udc_init);
+
+static void __exit udc_exit(void)
+{
+ platform_driver_unregister(&udc_driver);
+}
+module_exit(udc_exit);
+
+MODULE_DESCRIPTION("IMX USB Device Controller driver");
+MODULE_AUTHOR("Darius Augulis ");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:imx_udc");
diff --git a/drivers/usb/gadget/imx_udc.h b/drivers/usb/gadget/imx_udc.h
new file mode 100644
index 00000000000..850076937d8
--- /dev/null
+++ b/drivers/usb/gadget/imx_udc.h
@@ -0,0 +1,344 @@
+/*
+ * Copyright (C) 2005 Mike Lee(eemike@gmail.com)
+ *
+ * This udc driver is now under testing and code is based on pxa2xx_udc.h
+ * Please use it with your own risk!
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_USB_GADGET_IMX_H
+#define __LINUX_USB_GADGET_IMX_H
+
+#include
+
+/* Helper macros */
+#define EP_NO(ep) ((ep->bEndpointAddress) & ~USB_DIR_IN) /* IN:1, OUT:0 */
+#define EP_DIR(ep) ((ep->bEndpointAddress) & USB_DIR_IN ? 1 : 0)
+#define irq_to_ep(irq) (((irq) >= USBD_INT0) || ((irq) <= USBD_INT6) ? ((irq) - USBD_INT0) : (USBD_INT6)) /*should not happen*/
+#define ep_to_irq(ep) (EP_NO((ep)) + USBD_INT0)
+#define IMX_USB_NB_EP 6
+
+/* Driver structures */
+struct imx_request {
+ struct usb_request req;
+ struct list_head queue;
+ unsigned int in_use;
+};
+
+enum ep0_state {
+ EP0_IDLE,
+ EP0_IN_DATA_PHASE,
+ EP0_OUT_DATA_PHASE,
+ EP0_CONFIG,
+ EP0_STALL,
+};
+
+struct imx_ep_struct {
+ struct usb_ep ep;
+ struct imx_udc_struct *imx_usb;
+ struct list_head queue;
+ unsigned char stopped;
+ unsigned char fifosize;
+ unsigned char bEndpointAddress;
+ unsigned char bmAttributes;
+};
+
+struct imx_udc_struct {
+ struct usb_gadget gadget;
+ struct usb_gadget_driver *driver;
+ struct device *dev;
+ struct imx_ep_struct imx_ep[IMX_USB_NB_EP];
+ struct clk *clk;
+ enum ep0_state ep0state;
+ struct resource *res;
+ void __iomem *base;
+ unsigned char set_config;
+ int cfg,
+ intf,
+ alt,
+ usbd_int[7];
+};
+
+/* USB registers */
+#define USB_FRAME (0x00) /* USB frame */
+#define USB_SPEC (0x04) /* USB Spec */
+#define USB_STAT (0x08) /* USB Status */
+#define USB_CTRL (0x0C) /* USB Control */
+#define USB_DADR (0x10) /* USB Desc RAM addr */
+#define USB_DDAT (0x14) /* USB Desc RAM/EP buffer data */
+#define USB_INTR (0x18) /* USB interrupt */
+#define USB_MASK (0x1C) /* USB Mask */
+#define USB_ENAB (0x24) /* USB Enable */
+#define USB_EP_STAT(x) (0x30 + (x*0x30)) /* USB status/control */
+#define USB_EP_INTR(x) (0x34 + (x*0x30)) /* USB interrupt */
+#define USB_EP_MASK(x) (0x38 + (x*0x30)) /* USB mask */
+#define USB_EP_FDAT(x) (0x3C + (x*0x30)) /* USB FIFO data */
+#define USB_EP_FDAT0(x) (0x3C + (x*0x30)) /* USB FIFO data */
+#define USB_EP_FDAT1(x) (0x3D + (x*0x30)) /* USB FIFO data */
+#define USB_EP_FDAT2(x) (0x3E + (x*0x30)) /* USB FIFO data */
+#define USB_EP_FDAT3(x) (0x3F + (x*0x30)) /* USB FIFO data */
+#define USB_EP_FSTAT(x) (0x40 + (x*0x30)) /* USB FIFO status */
+#define USB_EP_FCTRL(x) (0x44 + (x*0x30)) /* USB FIFO control */
+#define USB_EP_LRFP(x) (0x48 + (x*0x30)) /* USB last read frame pointer */
+#define USB_EP_LWFP(x) (0x4C + (x*0x30)) /* USB last write frame pointer */
+#define USB_EP_FALRM(x) (0x50 + (x*0x30)) /* USB FIFO alarm */
+#define USB_EP_FRDP(x) (0x54 + (x*0x30)) /* USB FIFO read pointer */
+#define USB_EP_FWRP(x) (0x58 + (x*0x30)) /* USB FIFO write pointer */
+/* USB Control Register Bit Fields.*/
+#define CTRL_CMDOVER (1<<6) /* UDC status */
+#define CTRL_CMDERROR (1<<5) /* UDC status */
+#define CTRL_FE_ENA (1<<3) /* Enable Font End logic */
+#define CTRL_UDC_RST (1<<2) /* UDC reset */
+#define CTRL_AFE_ENA (1<<1) /* Analog Font end enable */
+#define CTRL_RESUME (1<<0) /* UDC resume */
+/* USB Status Register Bit Fields.*/
+#define STAT_RST (1<<8)
+#define STAT_SUSP (1<<7)
+#define STAT_CFG (3<<5)
+#define STAT_INTF (3<<3)
+#define STAT_ALTSET (7<<0)
+/* USB Interrupt Status/Mask Registers Bit fields */
+#define INTR_WAKEUP (1<<31) /* Wake up Interrupt */
+#define INTR_MSOF (1<<7) /* Missed Start of Frame */
+#define INTR_SOF (1<<6) /* Start of Frame */
+#define INTR_RESET_STOP (1<<5) /* Reset Signaling stop */
+#define INTR_RESET_START (1<<4) /* Reset Signaling start */
+#define INTR_RESUME (1<<3) /* Suspend to resume */
+#define INTR_SUSPEND (1<<2) /* Active to suspend */
+#define INTR_FRAME_MATCH (1<<1) /* Frame matched */
+#define INTR_CFG_CHG (1<<0) /* Configuration change occurred */
+/* USB Enable Register Bit Fields.*/
+#define ENAB_RST (1<<31) /* Reset USB modules */
+#define ENAB_ENAB (1<<30) /* Enable USB modules*/
+#define ENAB_SUSPEND (1<<29) /* Suspend USB modules */
+#define ENAB_ENDIAN (1<<28) /* Endian of USB modules */
+#define ENAB_PWRMD (1<<0) /* Power mode of USB modules */
+/* USB Descriptor Ram Address Register bit fields */
+#define DADR_CFG (1<<31) /* Configuration */
+#define DADR_BSY (1<<30) /* Busy status */
+#define DADR_DADR (0x1FF) /* Descriptor Ram Address */
+/* USB Descriptor RAM/Endpoint Buffer Data Register bit fields */
+#define DDAT_DDAT (0xFF) /* Descriptor Endpoint Buffer */
+/* USB Endpoint Status Register bit fields */
+#define EPSTAT_BCOUNT (0x7F<<16) /* Endpoint FIFO byte count */
+#define EPSTAT_SIP (1<<8) /* Endpoint setup in progress */
+#define EPSTAT_DIR (1<<7) /* Endpoint transfer direction */
+#define EPSTAT_MAX (3<<5) /* Endpoint Max packet size */
+#define EPSTAT_TYP (3<<3) /* Endpoint type */
+#define EPSTAT_ZLPS (1<<2) /* Send zero length packet */
+#define EPSTAT_FLUSH (1<<1) /* Endpoint FIFO Flush */
+#define EPSTAT_STALL (1<<0) /* Force stall */
+/* USB Endpoint FIFO Status Register bit fields */
+#define FSTAT_FRAME_STAT (0xF<<24) /* Frame status bit [0-3] */
+#define FSTAT_ERR (1<<22) /* FIFO error */
+#define FSTAT_UF (1<<21) /* FIFO underflow */
+#define FSTAT_OF (1<<20) /* FIFO overflow */
+#define FSTAT_FR (1<<19) /* FIFO frame ready */
+#define FSTAT_FULL (1<<18) /* FIFO full */
+#define FSTAT_ALRM (1<<17) /* FIFO alarm */
+#define FSTAT_EMPTY (1<<16) /* FIFO empty */
+/* USB Endpoint FIFO Control Register bit fields */
+#define FCTRL_WFR (1<<29) /* Write frame end */
+/* USB Endpoint Interrupt Status Regsiter bit fields */
+#define EPINTR_FIFO_FULL (1<<8) /* fifo full */
+#define EPINTR_FIFO_EMPTY (1<<7) /* fifo empty */
+#define EPINTR_FIFO_ERROR (1<<6) /* fifo error */
+#define EPINTR_FIFO_HIGH (1<<5) /* fifo high */
+#define EPINTR_FIFO_LOW (1<<4) /* fifo low */
+#define EPINTR_MDEVREQ (1<<3) /* multi Device request */
+#define EPINTR_EOT (1<<2) /* fifo end of transfer */
+#define EPINTR_DEVREQ (1<<1) /* Device request */
+#define EPINTR_EOF (1<<0) /* fifo end of frame */
+
+/* Debug macros */
+#ifdef DEBUG
+
+/* #define DEBUG_REQ */
+/* #define DEBUG_TRX */
+/* #define DEBUG_INIT */
+/* #define DEBUG_EP0 */
+/* #define DEBUG_EPX */
+/* #define DEBUG_IRQ */
+/* #define DEBUG_EPIRQ */
+/* #define DEBUG_DUMP */
+#define DEBUG_ERR
+
+#ifdef DEBUG_REQ
+ #define D_REQ(dev, args...) dev_dbg(dev, ## args)
+#else
+ #define D_REQ(dev, args...) do {} while (0)
+#endif /* DEBUG_REQ */
+
+#ifdef DEBUG_TRX
+ #define D_TRX(dev, args...) dev_dbg(dev, ## args)
+#else
+ #define D_TRX(dev, args...) do {} while (0)
+#endif /* DEBUG_TRX */
+
+#ifdef DEBUG_INIT
+ #define D_INI(dev, args...) dev_dbg(dev, ## args)
+#else
+ #define D_INI(dev, args...) do {} while (0)
+#endif /* DEBUG_INIT */
+
+#ifdef DEBUG_EP0
+ static const char *state_name[] = {
+ "EP0_IDLE",
+ "EP0_IN_DATA_PHASE",
+ "EP0_OUT_DATA_PHASE",
+ "EP0_CONFIG",
+ "EP0_STALL"
+ };
+ #define D_EP0(dev, args...) dev_dbg(dev, ## args)
+#else
+ #define D_EP0(dev, args...) do {} while (0)
+#endif /* DEBUG_EP0 */
+
+#ifdef DEBUG_EPX
+ #define D_EPX(dev, args...) dev_dbg(dev, ## args)
+#else
+ #define D_EPX(dev, args...) do {} while (0)
+#endif /* DEBUG_EP0 */
+
+#ifdef DEBUG_IRQ
+ static void dump_intr(const char *label, int irqreg, struct device *dev)
+ {
+ dev_dbg(dev, "<%s> USB_INTR=[%s%s%s%s%s%s%s%s%s]\n", label,
+ (irqreg & INTR_WAKEUP) ? " wake" : "",
+ (irqreg & INTR_MSOF) ? " msof" : "",
+ (irqreg & INTR_SOF) ? " sof" : "",
+ (irqreg & INTR_RESUME) ? " resume" : "",
+ (irqreg & INTR_SUSPEND) ? " suspend" : "",
+ (irqreg & INTR_RESET_STOP) ? " noreset" : "",
+ (irqreg & INTR_RESET_START) ? " reset" : "",
+ (irqreg & INTR_FRAME_MATCH) ? " fmatch" : "",
+ (irqreg & INTR_CFG_CHG) ? " config" : "");
+ }
+#else
+ #define dump_intr(x, y, z) do {} while (0)
+#endif /* DEBUG_IRQ */
+
+#ifdef DEBUG_EPIRQ
+ static void dump_ep_intr(const char *label, int nr, int irqreg, struct device *dev)
+ {
+ dev_dbg(dev, "<%s> EP%d_INTR=[%s%s%s%s%s%s%s%s%s]\n", label, nr,
+ (irqreg & EPINTR_FIFO_FULL) ? " full" : "",
+ (irqreg & EPINTR_FIFO_EMPTY) ? " fempty" : "",
+ (irqreg & EPINTR_FIFO_ERROR) ? " ferr" : "",
+ (irqreg & EPINTR_FIFO_HIGH) ? " fhigh" : "",
+ (irqreg & EPINTR_FIFO_LOW) ? " flow" : "",
+ (irqreg & EPINTR_MDEVREQ) ? " mreq" : "",
+ (irqreg & EPINTR_EOF) ? " eof" : "",
+ (irqreg & EPINTR_DEVREQ) ? " devreq" : "",
+ (irqreg & EPINTR_EOT) ? " eot" : "");
+ }
+#else
+ #define dump_ep_intr(x, y, z, i) do {} while (0)
+#endif /* DEBUG_IRQ */
+
+#ifdef DEBUG_DUMP
+ static void dump_usb_stat(const char *label, struct imx_udc_struct *imx_usb)
+ {
+ int temp = __raw_readl(imx_usb->base + USB_STAT);
+
+ dev_dbg(imx_usb->dev,
+ "<%s> USB_STAT=[%s%s CFG=%d, INTF=%d, ALTR=%d]\n", label,
+ (temp & STAT_RST) ? " reset" : "",
+ (temp & STAT_SUSP) ? " suspend" : "",
+ (temp & STAT_CFG) >> 5,
+ (temp & STAT_INTF) >> 3,
+ (temp & STAT_ALTSET));
+ }
+
+ static void dump_ep_stat(const char *label, struct imx_ep_struct *imx_ep)
+ {
+ int temp = __raw_readl(imx_ep->imx_usb->base + USB_EP_INTR(EP_NO(imx_ep)));
+
+ dev_dbg(imx_ep->imx_usb->dev,
+ "<%s> EP%d_INTR=[%s%s%s%s%s%s%s%s%s]\n", label, EP_NO(imx_ep),
+ (temp & EPINTR_FIFO_FULL) ? " full" : "",
+ (temp & EPINTR_FIFO_EMPTY) ? " fempty" : "",
+ (temp & EPINTR_FIFO_ERROR) ? " ferr" : "",
+ (temp & EPINTR_FIFO_HIGH) ? " fhigh" : "",
+ (temp & EPINTR_FIFO_LOW) ? " flow" : "",
+ (temp & EPINTR_MDEVREQ) ? " mreq" : "",
+ (temp & EPINTR_EOF) ? " eof" : "",
+ (temp & EPINTR_DEVREQ) ? " devreq" : "",
+ (temp & EPINTR_EOT) ? " eot" : "");
+
+ temp = __raw_readl(imx_ep->imx_usb->base + USB_EP_STAT(EP_NO(imx_ep)));
+
+ dev_dbg(imx_ep->imx_usb->dev,
+ "<%s> EP%d_STAT=[%s%s bcount=%d]\n", label, EP_NO(imx_ep),
+ (temp & EPSTAT_SIP) ? " sip" : "",
+ (temp & EPSTAT_STALL) ? " stall" : "",
+ (temp & EPSTAT_BCOUNT) >> 16);
+
+ temp = __raw_readl(imx_ep->imx_usb->base + USB_EP_FSTAT(EP_NO(imx_ep)));
+
+ dev_dbg(imx_ep->imx_usb->dev,
+ "<%s> EP%d_FSTAT=[%s%s%s%s%s%s%s]\n", label, EP_NO(imx_ep),
+ (temp & FSTAT_ERR) ? " ferr" : "",
+ (temp & FSTAT_UF) ? " funder" : "",
+ (temp & FSTAT_OF) ? " fover" : "",
+ (temp & FSTAT_FR) ? " fready" : "",
+ (temp & FSTAT_FULL) ? " ffull" : "",
+ (temp & FSTAT_ALRM) ? " falarm" : "",
+ (temp & FSTAT_EMPTY) ? " fempty" : "");
+ }
+
+ static void dump_req(const char *label, struct imx_ep_struct *imx_ep, struct usb_request *req)
+ {
+ int i;
+
+ if (!req || !req->buf) {
+ dev_dbg(imx_ep->imx_usb->dev, "<%s> req or req buf is free\n", label);
+ return;
+ }
+
+ if ((!EP_NO(imx_ep) && imx_ep->imx_usb->ep0state == EP0_IN_DATA_PHASE)
+ || (EP_NO(imx_ep) && EP_DIR(imx_ep))) {
+
+ dev_dbg(imx_ep->imx_usb->dev, "<%s> request dump <", label);
+ for (i = 0; i < req->length; i++)
+ printk("%02x-", *((u8 *)req->buf + i));
+ printk(">\n");
+ }
+ }
+
+#else
+ #define dump_ep_stat(x, y) do {} while (0)
+ #define dump_usb_stat(x, y) do {} while (0)
+ #define dump_req(x, y, z) do {} while (0)
+#endif /* DEBUG_DUMP */
+
+#ifdef DEBUG_ERR
+ #define D_ERR(dev, args...) dev_dbg(dev, ## args)
+#else
+ #define D_ERR(dev, args...) do {} while (0)
+#endif
+
+#else
+ #define D_REQ(dev, args...) do {} while (0)
+ #define D_TRX(dev, args...) do {} while (0)
+ #define D_INI(dev, args...) do {} while (0)
+ #define D_EP0(dev, args...) do {} while (0)
+ #define D_EPX(dev, args...) do {} while (0)
+ #define dump_ep_intr(x, y, z, i) do {} while (0)
+ #define dump_intr(x, y, z) do {} while (0)
+ #define dump_ep_stat(x, y) do {} while (0)
+ #define dump_usb_stat(x, y) do {} while (0)
+ #define dump_req(x, y, z) do {} while (0)
+ #define D_ERR(dev, args...) do {} while (0)
+#endif /* DEBUG */
+
+#endif /* __LINUX_USB_GADGET_IMX_H */
diff --git a/drivers/usb/gadget/m66592-udc.c b/drivers/usb/gadget/m66592-udc.c
index 3a8879ec206..43dcf9e1af6 100644
--- a/drivers/usb/gadget/m66592-udc.c
+++ b/drivers/usb/gadget/m66592-udc.c
@@ -1546,8 +1546,6 @@ static void nop_completion(struct usb_ep *ep, struct usb_request *r)
{
}
-#define resource_len(r) (((r)->end - (r)->start) + 1)
-
static int __init m66592_probe(struct platform_device *pdev)
{
struct resource *res;
@@ -1560,11 +1558,10 @@ static int __init m66592_probe(struct platform_device *pdev)
int ret = 0;
int i;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- (char *)udc_name);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
ret = -ENODEV;
- pr_err("platform_get_resource_byname error.\n");
+ pr_err("platform_get_resource error.\n");
goto clean_up;
}
@@ -1575,7 +1572,7 @@ static int __init m66592_probe(struct platform_device *pdev)
goto clean_up;
}
- reg = ioremap(res->start, resource_len(res));
+ reg = ioremap(res->start, resource_size(res));
if (reg == NULL) {
ret = -ENOMEM;
pr_err("ioremap error.\n");
diff --git a/drivers/usb/gadget/net2280.c b/drivers/usb/gadget/net2280.c
index 8ae70de2c37..12c6d83b218 100644
--- a/drivers/usb/gadget/net2280.c
+++ b/drivers/usb/gadget/net2280.c
@@ -669,7 +669,7 @@ fill_dma_desc (struct net2280_ep *ep, struct net2280_request *req, int valid)
/* 2280 may be polling VALID_BIT through ep->dma->dmadesc */
wmb ();
- td->dmacount = cpu_to_le32p (&dmacount);
+ td->dmacount = cpu_to_le32(dmacount);
}
static const u32 dmactl_default =
diff --git a/drivers/usb/gadget/omap_udc.c b/drivers/usb/gadget/omap_udc.c
index 34e9e393f92..57d9641c6bf 100644
--- a/drivers/usb/gadget/omap_udc.c
+++ b/drivers/usb/gadget/omap_udc.c
@@ -3006,7 +3006,7 @@ cleanup1:
cleanup0:
if (xceiv)
- put_device(xceiv->dev);
+ otg_put_transceiver(xceiv);
if (cpu_is_omap16xx() || cpu_is_omap24xx()) {
clk_disable(hhc_clk);
@@ -3034,7 +3034,7 @@ static int __exit omap_udc_remove(struct platform_device *pdev)
pullup_disable(udc);
if (udc->transceiver) {
- put_device(udc->transceiver->dev);
+ otg_put_transceiver(udc->transceiver);
udc->transceiver = NULL;
}
omap_writew(0, UDC_SYSCON1);
diff --git a/drivers/usb/gadget/pxa25x_udc.c b/drivers/usb/gadget/pxa25x_udc.c
index 697a0ca349b..9b36205c575 100644
--- a/drivers/usb/gadget/pxa25x_udc.c
+++ b/drivers/usb/gadget/pxa25x_udc.c
@@ -2198,7 +2198,7 @@ static int __init pxa25x_udc_probe(struct platform_device *pdev)
udc_disable(dev);
udc_reinit(dev);
- dev->vbus = is_vbus_present();
+ dev->vbus = !!is_vbus_present();
/* irq setup after old hardware state is cleaned up */
retval = request_irq(irq, pxa25x_udc_irq,
diff --git a/drivers/usb/gadget/pxa27x_udc.c b/drivers/usb/gadget/pxa27x_udc.c
index 65110d02a20..990f40f988d 100644
--- a/drivers/usb/gadget/pxa27x_udc.c
+++ b/drivers/usb/gadget/pxa27x_udc.c
@@ -430,7 +430,6 @@ static void pio_irq_enable(struct pxa_ep *ep)
/**
* pio_irq_disable - Disables irq generation for one endpoint
* @ep: udc endpoint
- * @index: endpoint number
*/
static void pio_irq_disable(struct pxa_ep *ep)
{
@@ -586,7 +585,6 @@ static void inc_ep_stats_reqs(struct pxa_ep *ep, int is_in)
* inc_ep_stats_bytes - Update ep stats counts
* @ep: physical endpoint
* @count: bytes transfered on endpoint
- * @req: usb request
* @is_in: ep direction (USB_DIR_IN or 0)
*/
static void inc_ep_stats_bytes(struct pxa_ep *ep, int count, int is_in)
diff --git a/drivers/usb/gadget/s3c2410_udc.c b/drivers/usb/gadget/s3c2410_udc.c
index c7e25563680..9a2b8920532 100644
--- a/drivers/usb/gadget/s3c2410_udc.c
+++ b/drivers/usb/gadget/s3c2410_udc.c
@@ -36,6 +36,7 @@
#include
#include
#include
+#include
#include
#include
@@ -51,7 +52,6 @@
#include
#include
-#include
#include
#include
@@ -1510,11 +1510,7 @@ static irqreturn_t s3c2410_udc_vbus_irq(int irq, void *_dev)
dprintk(DEBUG_NORMAL, "%s()\n", __func__);
- /* some cpus cannot read from an line configured to IRQ! */
- s3c2410_gpio_cfgpin(udc_info->vbus_pin, S3C2410_GPIO_INPUT);
- value = s3c2410_gpio_getpin(udc_info->vbus_pin);
- s3c2410_gpio_cfgpin(udc_info->vbus_pin, S3C2410_GPIO_SFN2);
-
+ value = gpio_get_value(udc_info->vbus_pin) ? 1 : 0;
if (udc_info->vbus_pin_inverted)
value = !value;
@@ -1802,7 +1798,7 @@ static int s3c2410_udc_probe(struct platform_device *pdev)
struct s3c2410_udc *udc = &memory;
struct device *dev = &pdev->dev;
int retval;
- unsigned int irq;
+ int irq;
dev_dbg(dev, "%s()\n", __func__);
@@ -1861,7 +1857,7 @@ static int s3c2410_udc_probe(struct platform_device *pdev)
/* irq setup after old hardware state is cleaned up */
retval = request_irq(IRQ_USBD, s3c2410_udc_irq,
- IRQF_DISABLED, gadget_name, udc);
+ IRQF_DISABLED, gadget_name, udc);
if (retval != 0) {
dev_err(dev, "cannot get irq %i, err %d\n", IRQ_USBD, retval);
@@ -1872,17 +1868,28 @@ static int s3c2410_udc_probe(struct platform_device *pdev)
dev_dbg(dev, "got irq %i\n", IRQ_USBD);
if (udc_info && udc_info->vbus_pin > 0) {
- irq = s3c2410_gpio_getirq(udc_info->vbus_pin);
+ retval = gpio_request(udc_info->vbus_pin, "udc vbus");
+ if (retval < 0) {
+ dev_err(dev, "cannot claim vbus pin\n");
+ goto err_int;
+ }
+
+ irq = gpio_to_irq(udc_info->vbus_pin);
+ if (irq < 0) {
+ dev_err(dev, "no irq for gpio vbus pin\n");
+ goto err_gpio_claim;
+ }
+
retval = request_irq(irq, s3c2410_udc_vbus_irq,
IRQF_DISABLED | IRQF_TRIGGER_RISING
| IRQF_TRIGGER_FALLING | IRQF_SHARED,
gadget_name, udc);
if (retval != 0) {
- dev_err(dev, "can't get vbus irq %i, err %d\n",
+ dev_err(dev, "can't get vbus irq %d, err %d\n",
irq, retval);
retval = -EBUSY;
- goto err_int;
+ goto err_gpio_claim;
}
dev_dbg(dev, "got irq %i\n", irq);
@@ -1902,6 +1909,9 @@ static int s3c2410_udc_probe(struct platform_device *pdev)
return 0;
+err_gpio_claim:
+ if (udc_info && udc_info->vbus_pin > 0)
+ gpio_free(udc_info->vbus_pin);
err_int:
free_irq(IRQ_USBD, udc);
err_map:
@@ -1927,7 +1937,7 @@ static int s3c2410_udc_remove(struct platform_device *pdev)
debugfs_remove(udc->regs_info);
if (udc_info && udc_info->vbus_pin > 0) {
- irq = s3c2410_gpio_getirq(udc_info->vbus_pin);
+ irq = gpio_to_irq(udc_info->vbus_pin);
free_irq(irq, udc);
}
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index f3a75a929e0..2b476b6b3d4 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -96,6 +96,19 @@ config USB_EHCI_HCD_PPC_OF
Enables support for the USB controller present on the PowerPC
OpenFirmware platform bus.
+config USB_OXU210HP_HCD
+ tristate "OXU210HP HCD support"
+ depends on USB
+ ---help---
+ The OXU210HP is an USB host/OTG/device controller. Enable this
+ option if your board has this chip. If unsure, say N.
+
+ This driver does not support isochronous transfers and doesn't
+ implement OTG nor USB device controllers.
+
+ To compile this driver as a module, choose M here: the
+ module will be called oxu210hp-hcd.
+
config USB_ISP116X_HCD
tristate "ISP116X HCD support"
depends on USB
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index 23be2222404..e5f3f20787e 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_USB_WHCI_HCD) += whci/
obj-$(CONFIG_PCI) += pci-quirks.o
obj-$(CONFIG_USB_EHCI_HCD) += ehci-hcd.o
+obj-$(CONFIG_USB_OXU210HP_HCD) += oxu210hp-hcd.o
obj-$(CONFIG_USB_ISP116X_HCD) += isp116x-hcd.o
obj-$(CONFIG_USB_OHCI_HCD) += ohci-hcd.o
obj-$(CONFIG_USB_UHCI_HCD) += uhci-hcd.o
diff --git a/drivers/usb/host/ehci-dbg.c b/drivers/usb/host/ehci-dbg.c
index 0cb53ca8d34..7f4ace73d44 100644
--- a/drivers/usb/host/ehci-dbg.c
+++ b/drivers/usb/host/ehci-dbg.c
@@ -455,9 +455,7 @@ static void qh_lines (
(scratch >> 16) & 0x7fff,
scratch,
td->urb);
- if (temp < 0)
- temp = 0;
- else if (size < temp)
+ if (size < temp)
temp = size;
size -= temp;
next += temp;
@@ -466,9 +464,7 @@ static void qh_lines (
}
temp = snprintf (next, size, "\n");
- if (temp < 0)
- temp = 0;
- else if (size < temp)
+ if (size < temp)
temp = size;
size -= temp;
next += temp;
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index 218f9660d7e..97a53a48a3d 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -194,6 +194,7 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
u32 temp;
u32 power_okay;
int i;
+ u8 resume_needed = 0;
if (time_before (jiffies, ehci->next_statechange))
msleep(5);
@@ -228,7 +229,9 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
/* Some controller/firmware combinations need a delay during which
* they set up the port statuses. See Bugzilla #8190. */
- mdelay(8);
+ spin_unlock_irq(&ehci->lock);
+ msleep(8);
+ spin_lock_irq(&ehci->lock);
/* manually resume the ports we suspended during bus_suspend() */
i = HCS_N_PORTS (ehci->hcs_params);
@@ -236,12 +239,21 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
temp = ehci_readl(ehci, &ehci->regs->port_status [i]);
temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS);
if (test_bit(i, &ehci->bus_suspended) &&
- (temp & PORT_SUSPEND))
+ (temp & PORT_SUSPEND)) {
temp |= PORT_RESUME;
+ resume_needed = 1;
+ }
ehci_writel(ehci, temp, &ehci->regs->port_status [i]);
}
+
+ /* msleep for 20ms only if code is trying to resume port */
+ if (resume_needed) {
+ spin_unlock_irq(&ehci->lock);
+ msleep(20);
+ spin_lock_irq(&ehci->lock);
+ }
+
i = HCS_N_PORTS (ehci->hcs_params);
- mdelay (20);
while (i--) {
temp = ehci_readl(ehci, &ehci->regs->port_status [i]);
if (test_bit(i, &ehci->bus_suspended) &&
@@ -422,8 +434,15 @@ static int check_reset_complete (
port_status &= ~PORT_RWC_BITS;
ehci_writel(ehci, port_status, status_reg);
- } else
+ /* ensure 440EPX ohci controller state is operational */
+ if (ehci->has_amcc_usb23)
+ set_ohci_hcfs(ehci, 1);
+ } else {
ehci_dbg (ehci, "port %d high speed\n", index + 1);
+ /* ensure 440EPx ohci controller state is suspended */
+ if (ehci->has_amcc_usb23)
+ set_ohci_hcfs(ehci, 0);
+ }
return port_status;
}
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index 36864f95844..bdc6e86e1f8 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -219,15 +219,19 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
/* Serial Bus Release Number is at PCI 0x60 offset */
pci_read_config_byte(pdev, 0x60, &ehci->sbrn);
- /* Workaround current PCI init glitch: wakeup bits aren't
- * being set from PCI PM capability.
+ /* Keep this around for a while just in case some EHCI
+ * implementation uses legacy PCI PM support. This test
+ * can be removed on 17 Dec 2009 if the dev_warn() hasn't
+ * been triggered by then.
*/
if (!device_can_wakeup(&pdev->dev)) {
u16 port_wake;
pci_read_config_word(pdev, 0x62, &port_wake);
- if (port_wake & 0x0001)
+ if (port_wake & 0x0001) {
+ dev_warn(&pdev->dev, "Enabling legacy PCI PM\n");
device_init_wakeup(&pdev->dev, 1);
+ }
}
#ifdef CONFIG_USB_SUSPEND
@@ -428,6 +432,8 @@ static struct pci_driver ehci_pci_driver = {
#ifdef CONFIG_PM
.suspend = usb_hcd_pci_suspend,
+ .suspend_late = usb_hcd_pci_suspend_late,
+ .resume_early = usb_hcd_pci_resume_early,
.resume = usb_hcd_pci_resume,
#endif
.shutdown = usb_hcd_pci_shutdown,
diff --git a/drivers/usb/host/ehci-ppc-of.c b/drivers/usb/host/ehci-ppc-of.c
index b018deed2e8..ef732b704f5 100644
--- a/drivers/usb/host/ehci-ppc-of.c
+++ b/drivers/usb/host/ehci-ppc-of.c
@@ -107,11 +107,13 @@ ehci_hcd_ppc_of_probe(struct of_device *op, const struct of_device_id *match)
{
struct device_node *dn = op->node;
struct usb_hcd *hcd;
- struct ehci_hcd *ehci;
+ struct ehci_hcd *ehci = NULL;
struct resource res;
int irq;
int rv;
+ struct device_node *np;
+
if (usb_disabled())
return -ENODEV;
@@ -149,6 +151,20 @@ ehci_hcd_ppc_of_probe(struct of_device *op, const struct of_device_id *match)
}
ehci = hcd_to_ehci(hcd);
+ np = of_find_compatible_node(NULL, NULL, "ibm,usb-ohci-440epx");
+ if (np != NULL) {
+ /* claim we really affected by usb23 erratum */
+ if (!of_address_to_resource(np, 0, &res))
+ ehci->ohci_hcctrl_reg = ioremap(res.start +
+ OHCI_HCCTRL_OFFSET, OHCI_HCCTRL_LEN);
+ else
+ pr_debug(__FILE__ ": no ohci offset in fdt\n");
+ if (!ehci->ohci_hcctrl_reg) {
+ pr_debug(__FILE__ ": ioremap for ohci hcctrl failed\n");
+ } else {
+ ehci->has_amcc_usb23 = 1;
+ }
+ }
if (of_get_property(dn, "big-endian", NULL)) {
ehci->big_endian_mmio = 1;
@@ -181,6 +197,9 @@ err_ioremap:
irq_dispose_mapping(irq);
err_irq:
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+
+ if (ehci->has_amcc_usb23)
+ iounmap(ehci->ohci_hcctrl_reg);
err_rmr:
usb_put_hcd(hcd);
@@ -191,6 +210,11 @@ err_rmr:
static int ehci_hcd_ppc_of_remove(struct of_device *op)
{
struct usb_hcd *hcd = dev_get_drvdata(&op->dev);
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+
+ struct device_node *np;
+ struct resource res;
+
dev_set_drvdata(&op->dev, NULL);
dev_dbg(&op->dev, "stopping PPC-OF USB Controller\n");
@@ -201,6 +225,25 @@ static int ehci_hcd_ppc_of_remove(struct of_device *op)
irq_dispose_mapping(hcd->irq);
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+ /* use request_mem_region to test if the ohci driver is loaded. if so
+ * ensure the ohci core is operational.
+ */
+ if (ehci->has_amcc_usb23) {
+ np = of_find_compatible_node(NULL, NULL, "ibm,usb-ohci-440epx");
+ if (np != NULL) {
+ if (!of_address_to_resource(np, 0, &res))
+ if (!request_mem_region(res.start,
+ 0x4, hcd_name))
+ set_ohci_hcfs(ehci, 1);
+ else
+ release_mem_region(res.start, 0x4);
+ else
+ pr_debug(__FILE__ ": no ohci offset in fdt\n");
+ of_node_put(np);
+ }
+
+ iounmap(ehci->ohci_hcctrl_reg);
+ }
usb_put_hcd(hcd);
return 0;
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index c7d4b5a06bd..fb7054ccf4f 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -120,6 +120,16 @@ struct ehci_hcd { /* one per controller */
unsigned has_fsl_port_bug:1; /* FreeScale */
unsigned big_endian_mmio:1;
unsigned big_endian_desc:1;
+ unsigned has_amcc_usb23:1;
+
+ /* required for usb32 quirk */
+ #define OHCI_CTRL_HCFS (3 << 6)
+ #define OHCI_USB_OPER (2 << 6)
+ #define OHCI_USB_SUSPEND (3 << 6)
+
+ #define OHCI_HCCTRL_OFFSET 0x4
+ #define OHCI_HCCTRL_LEN 0x4
+ __hc32 *ohci_hcctrl_reg;
u8 sbrn; /* packed release number */
@@ -636,6 +646,30 @@ static inline void ehci_writel(const struct ehci_hcd *ehci,
#endif
}
+/*
+ * On certain ppc-44x SoC there is a HW issue, that could only worked around with
+ * explicit suspend/operate of OHCI. This function hereby makes sense only on that arch.
+ * Other common bits are dependant on has_amcc_usb23 quirk flag.
+ */
+#ifdef CONFIG_44x
+static inline void set_ohci_hcfs(struct ehci_hcd *ehci, int operational)
+{
+ u32 hc_control;
+
+ hc_control = (readl_be(ehci->ohci_hcctrl_reg) & ~OHCI_CTRL_HCFS);
+ if (operational)
+ hc_control |= OHCI_USB_OPER;
+ else
+ hc_control |= OHCI_USB_SUSPEND;
+
+ writel_be(hc_control, ehci->ohci_hcctrl_reg);
+ (void) readl_be(ehci->ohci_hcctrl_reg);
+}
+#else
+static inline void set_ohci_hcfs(struct ehci_hcd *ehci, int operational)
+{ }
+#endif
+
/*-------------------------------------------------------------------------*/
/*
diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c
index 8017f1cf78e..b899f1a59c2 100644
--- a/drivers/usb/host/isp1760-hcd.c
+++ b/drivers/usb/host/isp1760-hcd.c
@@ -435,14 +435,13 @@ static int isp1760_hc_setup(struct usb_hcd *hcd)
/*
* PORT 1 Control register of the ISP1760 is the OTG control
- * register on ISP1761.
+ * register on ISP1761. Since there is no OTG or device controller
+ * support in this driver, we use port 1 as a "normal" USB host port on
+ * both chips.
*/
- if (!(priv->devflags & ISP1760_FLAG_ISP1761) &&
- !(priv->devflags & ISP1760_FLAG_PORT1_DIS)) {
- isp1760_writel(PORT1_POWER | PORT1_INIT2,
- hcd->regs + HC_PORT1_CTRL);
- mdelay(10);
- }
+ isp1760_writel(PORT1_POWER | PORT1_INIT2,
+ hcd->regs + HC_PORT1_CTRL);
+ mdelay(10);
priv->hcs_params = isp1760_readl(hcd->regs + HC_HCSPARAMS);
diff --git a/drivers/usb/host/isp1760-hcd.h b/drivers/usb/host/isp1760-hcd.h
index 4377277667d..a9daea58796 100644
--- a/drivers/usb/host/isp1760-hcd.h
+++ b/drivers/usb/host/isp1760-hcd.h
@@ -135,7 +135,6 @@ typedef void (packet_enqueue)(struct usb_hcd *hcd, struct isp1760_qh *qh,
* indicate the most "atypical" case, so that a devflags of 0 is
* a sane default configuration.
*/
-#define ISP1760_FLAG_PORT1_DIS 0x00000001 /* Port 1 disabled */
#define ISP1760_FLAG_BUS_WIDTH_16 0x00000002 /* 16-bit data bus width */
#define ISP1760_FLAG_OTG_EN 0x00000004 /* Port 1 supports OTG */
#define ISP1760_FLAG_ANALOG_OC 0x00000008 /* Analog overcurrent */
diff --git a/drivers/usb/host/isp1760-if.c b/drivers/usb/host/isp1760-if.c
index b87ca7cf4b3..4cf7ca428b3 100644
--- a/drivers/usb/host/isp1760-if.c
+++ b/drivers/usb/host/isp1760-if.c
@@ -60,9 +60,6 @@ static int of_isp1760_probe(struct of_device *dev,
if (of_device_is_compatible(dp, "nxp,usb-isp1761"))
devflags |= ISP1760_FLAG_ISP1761;
- if (of_get_property(dp, "port1-disable", NULL) != NULL)
- devflags |= ISP1760_FLAG_PORT1_DIS;
-
/* Some systems wire up only 16 of the 32 data lines */
prop = of_get_property(dp, "bus-width", NULL);
if (prop && *prop == 16)
@@ -129,23 +126,23 @@ static struct of_platform_driver isp1760_of_driver = {
#endif
#ifdef CONFIG_PCI
-static u32 nxp_pci_io_base;
-static u32 iolength;
-static u32 pci_mem_phy0;
-static u32 length;
-static u8 __iomem *chip_addr;
-static u8 __iomem *iobase;
-
static int __devinit isp1761_pci_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
u8 latency, limit;
__u32 reg_data;
int retry_count;
- int length;
- int status = 1;
struct usb_hcd *hcd;
unsigned int devflags = 0;
+ int ret_status = 0;
+
+ resource_size_t pci_mem_phy0;
+ resource_size_t memlength;
+
+ u8 __iomem *chip_addr;
+ u8 __iomem *iobase;
+ resource_size_t nxp_pci_io_base;
+ resource_size_t iolength;
if (usb_disabled())
return -ENODEV;
@@ -168,26 +165,30 @@ static int __devinit isp1761_pci_probe(struct pci_dev *dev,
iobase = ioremap_nocache(nxp_pci_io_base, iolength);
if (!iobase) {
printk(KERN_ERR "ioremap #1\n");
- release_mem_region(nxp_pci_io_base, iolength);
- return -ENOMEM;
+ ret_status = -ENOMEM;
+ goto cleanup1;
}
/* Grab the PLX PCI shared memory of the ISP 1761 we need */
pci_mem_phy0 = pci_resource_start(dev, 3);
- length = pci_resource_len(dev, 3);
-
- if (length < 0xffff) {
- printk(KERN_ERR "memory length for this resource is less than "
- "required\n");
- release_mem_region(nxp_pci_io_base, iolength);
- iounmap(iobase);
- return -ENOMEM;
+ memlength = pci_resource_len(dev, 3);
+ if (memlength < 0xffff) {
+ printk(KERN_ERR "memory length for this resource is wrong\n");
+ ret_status = -ENOMEM;
+ goto cleanup2;
}
- if (!request_mem_region(pci_mem_phy0, length, "ISP-PCI")) {
+ if (!request_mem_region(pci_mem_phy0, memlength, "ISP-PCI")) {
printk(KERN_ERR "host controller already in use\n");
- release_mem_region(nxp_pci_io_base, iolength);
- iounmap(iobase);
- return -EBUSY;
+ ret_status = -EBUSY;
+ goto cleanup2;
+ }
+
+ /* map available memory */
+ chip_addr = ioremap_nocache(pci_mem_phy0,memlength);
+ if (!chip_addr) {
+ printk(KERN_ERR "Error ioremap failed\n");
+ ret_status = -ENOMEM;
+ goto cleanup3;
}
/* bad pci latencies can contribute to overruns */
@@ -210,39 +211,54 @@ static int __devinit isp1761_pci_probe(struct pci_dev *dev,
* */
writel(0xface, chip_addr + HC_SCRATCH_REG);
udelay(100);
- reg_data = readl(chip_addr + HC_SCRATCH_REG);
+ reg_data = readl(chip_addr + HC_SCRATCH_REG) & 0x0000ffff;
retry_count--;
}
+ iounmap(chip_addr);
+
/* Host Controller presence is detected by writing to scratch register
* and reading back and checking the contents are same or not
*/
if (reg_data != 0xFACE) {
dev_err(&dev->dev, "scratch register mismatch %x\n", reg_data);
- goto clean;
+ ret_status = -ENOMEM;
+ goto cleanup3;
}
pci_set_master(dev);
- status = readl(iobase + 0x68);
- status |= 0x900;
- writel(status, iobase + 0x68);
+ /* configure PLX PCI chip to pass interrupts */
+#define PLX_INT_CSR_REG 0x68
+ reg_data = readl(iobase + PLX_INT_CSR_REG);
+ reg_data |= 0x900;
+ writel(reg_data, iobase + PLX_INT_CSR_REG);
dev->dev.dma_mask = NULL;
- hcd = isp1760_register(pci_mem_phy0, length, dev->irq,
+ hcd = isp1760_register(pci_mem_phy0, memlength, dev->irq,
IRQF_SHARED | IRQF_DISABLED, &dev->dev, dev_name(&dev->dev),
devflags);
- if (!IS_ERR(hcd)) {
- pci_set_drvdata(dev, hcd);
- return 0;
+ if (IS_ERR(hcd)) {
+ ret_status = -ENODEV;
+ goto cleanup3;
}
-clean:
- status = -ENODEV;
+
+ /* done with PLX IO access */
iounmap(iobase);
- release_mem_region(pci_mem_phy0, length);
release_mem_region(nxp_pci_io_base, iolength);
- return status;
+
+ pci_set_drvdata(dev, hcd);
+ return 0;
+
+cleanup3:
+ release_mem_region(pci_mem_phy0, memlength);
+cleanup2:
+ iounmap(iobase);
+cleanup1:
+ release_mem_region(nxp_pci_io_base, iolength);
+ return ret_status;
}
+
static void isp1761_pci_remove(struct pci_dev *dev)
{
struct usb_hcd *hcd;
@@ -255,12 +271,6 @@ static void isp1761_pci_remove(struct pci_dev *dev)
usb_put_hcd(hcd);
pci_disable_device(dev);
-
- iounmap(iobase);
- iounmap(chip_addr);
-
- release_mem_region(nxp_pci_io_base, iolength);
- release_mem_region(pci_mem_phy0, length);
}
static void isp1761_pci_shutdown(struct pci_dev *dev)
@@ -268,12 +278,16 @@ static void isp1761_pci_shutdown(struct pci_dev *dev)
printk(KERN_ERR "ips1761_pci_shutdown\n");
}
-static const struct pci_device_id isp1760_plx [] = { {
- /* handle any USB 2.0 EHCI controller */
- PCI_DEVICE_CLASS(((PCI_CLASS_BRIDGE_OTHER << 8) | (0x06 << 16)), ~0),
- .driver_data = 0,
-},
-{ /* end: all zeroes */ }
+static const struct pci_device_id isp1760_plx [] = {
+ {
+ .class = PCI_CLASS_BRIDGE_OTHER << 8,
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_PLX,
+ .device = 0x5406,
+ .subvendor = PCI_VENDOR_ID_PLX,
+ .subdevice = 0x9054,
+ },
+ { }
};
MODULE_DEVICE_TABLE(pci, isp1760_plx);
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 8aa3f4556a3..65a9609f4ad 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -589,13 +589,15 @@ static int ohci_run (struct ohci_hcd *ohci)
/* also: power/overcurrent flags in roothub.a */
}
- /* Reset USB nearly "by the book". RemoteWakeupConnected was
- * saved if boot firmware (BIOS/SMM/...) told us it's connected,
- * or if bus glue did the same (e.g. for PCI add-in cards with
- * PCI PM support).
+ /* Reset USB nearly "by the book". RemoteWakeupConnected has
+ * to be checked in case boot firmware (BIOS/SMM/...) has set up
+ * wakeup in a way the bus isn't aware of (e.g., legacy PCI PM).
+ * If the bus glue detected wakeup capability then it should
+ * already be enabled. Either way, if wakeup should be enabled
+ * but isn't, we'll enable it now.
*/
if ((ohci->hc_control & OHCI_CTRL_RWC) != 0
- && !device_may_wakeup(hcd->self.controller))
+ && !device_can_wakeup(hcd->self.controller))
device_init_wakeup(hcd->self.controller, 1);
switch (ohci->hc_control & OHCI_CTRL_HCFS) {
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
index a9c2ae36c7a..8b28ae7865b 100644
--- a/drivers/usb/host/ohci-pci.c
+++ b/drivers/usb/host/ohci-pci.c
@@ -355,9 +355,9 @@ static int __devinit ohci_pci_start (struct usb_hcd *hcd)
/* RWC may not be set for add-in PCI cards, since boot
* firmware probably ignored them. This transfers PCI
- * PM wakeup capabilities (once the PCI layer is fixed).
+ * PM wakeup capabilities.
*/
- if (device_may_wakeup(&pdev->dev))
+ if (device_can_wakeup(&pdev->dev))
ohci->hc_control |= OHCI_CTRL_RWC;
}
#endif /* CONFIG_PM */
@@ -487,6 +487,8 @@ static struct pci_driver ohci_pci_driver = {
#ifdef CONFIG_PM
.suspend = usb_hcd_pci_suspend,
+ .suspend_late = usb_hcd_pci_suspend_late,
+ .resume_early = usb_hcd_pci_resume_early,
.resume = usb_hcd_pci_resume,
#endif
diff --git a/drivers/usb/host/ohci-pnx4008.c b/drivers/usb/host/ohci-pnx4008.c
index e306ca6aef3..100bf3d8437 100644
--- a/drivers/usb/host/ohci-pnx4008.c
+++ b/drivers/usb/host/ohci-pnx4008.c
@@ -106,65 +106,34 @@ extern int ocpi_enable(void);
static struct clk *usb_clk;
-static int isp1301_probe(struct i2c_adapter *adap);
-static int isp1301_detach(struct i2c_client *client);
-
static const unsigned short normal_i2c[] =
{ ISP1301_I2C_ADDR, ISP1301_I2C_ADDR + 1, I2C_CLIENT_END };
-static const unsigned short dummy_i2c_addrlist[] = { I2C_CLIENT_END };
-static struct i2c_client_address_data addr_data = {
- .normal_i2c = normal_i2c,
- .probe = dummy_i2c_addrlist,
- .ignore = dummy_i2c_addrlist,
+static int isp1301_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ return 0;
+}
+
+static int isp1301_remove(struct i2c_client *client)
+{
+ return 0;
+}
+
+const struct i2c_device_id isp1301_id[] = {
+ { "isp1301_pnx", 0 },
+ { }
};
struct i2c_driver isp1301_driver = {
.driver = {
.name = "isp1301_pnx",
},
- .attach_adapter = isp1301_probe,
- .detach_client = isp1301_detach,
+ .probe = isp1301_probe,
+ .remove = isp1301_remove,
+ .id_table = isp1301_id,
};
-static int isp1301_attach(struct i2c_adapter *adap, int addr, int kind)
-{
- struct i2c_client *c;
- int err;
-
- c = kzalloc(sizeof(*c), GFP_KERNEL);
- if (!c)
- return -ENOMEM;
-
- strlcpy(c->name, "isp1301_pnx", I2C_NAME_SIZE);
- c->flags = 0;
- c->addr = addr;
- c->adapter = adap;
- c->driver = &isp1301_driver;
-
- err = i2c_attach_client(c);
- if (err) {
- kfree(c);
- return err;
- }
-
- isp1301_i2c_client = c;
-
- return 0;
-}
-
-static int isp1301_probe(struct i2c_adapter *adap)
-{
- return i2c_probe(adap, &addr_data, isp1301_attach);
-}
-
-static int isp1301_detach(struct i2c_client *client)
-{
- i2c_detach_client(client);
- kfree(isp1301_i2c_client);
- return 0;
-}
-
static void i2c_write(u8 buf, u8 subaddr)
{
char tmpbuf[2];
@@ -328,6 +297,8 @@ static int __devinit usb_hcd_pnx4008_probe(struct platform_device *pdev)
struct usb_hcd *hcd = 0;
struct ohci_hcd *ohci;
const struct hc_driver *driver = &ohci_pnx4008_hc_driver;
+ struct i2c_adapter *i2c_adap;
+ struct i2c_board_info i2c_info;
int ret = 0, irq;
@@ -351,9 +322,20 @@ static int __devinit usb_hcd_pnx4008_probe(struct platform_device *pdev)
ret = i2c_add_driver(&isp1301_driver);
if (ret < 0) {
- err("failed to connect I2C to ISP1301 USB Transceiver");
+ err("failed to add ISP1301 driver");
goto out;
}
+ i2c_adap = i2c_get_adapter(2);
+ memset(&i2c_info, 0, sizeof(struct i2c_board_info));
+ strlcpy(i2c_info.name, "isp1301_pnx", I2C_NAME_SIZE);
+ isp1301_i2c_client = i2c_new_probed_device(i2c_adap, &i2c_info,
+ normal_i2c);
+ i2c_put_adapter(i2c_adap);
+ if (!isp1301_i2c_client) {
+ err("failed to connect I2C to ISP1301 USB Transceiver");
+ ret = -ENODEV;
+ goto out_i2c_driver;
+ }
isp1301_configure();
@@ -429,6 +411,9 @@ out3:
out2:
clk_put(usb_clk);
out1:
+ i2c_unregister_client(isp1301_i2c_client);
+ isp1301_i2c_client = NULL;
+out_i2c_driver:
i2c_del_driver(&isp1301_driver);
out:
return ret;
@@ -445,6 +430,8 @@ static int usb_hcd_pnx4008_remove(struct platform_device *pdev)
pnx4008_unset_usb_bits();
clk_disable(usb_clk);
clk_put(usb_clk);
+ i2c_unregister_client(isp1301_i2c_client);
+ isp1301_i2c_client = NULL;
i2c_del_driver(&isp1301_driver);
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/usb/host/ohci-ppc-of.c b/drivers/usb/host/ohci-ppc-of.c
index 7ac53264ead..68a30171029 100644
--- a/drivers/usb/host/ohci-ppc-of.c
+++ b/drivers/usb/host/ohci-ppc-of.c
@@ -91,6 +91,7 @@ ohci_hcd_ppc_of_probe(struct of_device *op, const struct of_device_id *match)
int rv;
int is_bigendian;
+ struct device_node *np;
if (usb_disabled())
return -ENODEV;
@@ -147,6 +148,30 @@ ohci_hcd_ppc_of_probe(struct of_device *op, const struct of_device_id *match)
if (rv == 0)
return 0;
+ /* by now, 440epx is known to show usb_23 erratum */
+ np = of_find_compatible_node(NULL, NULL, "ibm,usb-ehci-440epx");
+
+ /* Work around - At this point ohci_run has executed, the
+ * controller is running, everything, the root ports, etc., is
+ * set up. If the ehci driver is loaded, put the ohci core in
+ * the suspended state. The ehci driver will bring it out of
+ * suspended state when / if a non-high speed USB device is
+ * attached to the USB Host port. If the ehci driver is not
+ * loaded, do nothing. request_mem_region is used to test if
+ * the ehci driver is loaded.
+ */
+ if (np != NULL) {
+ if (!of_address_to_resource(np, 0, &res)) {
+ if (!request_mem_region(res.start, 0x4, hcd_name)) {
+ writel_be((readl_be(&ohci->regs->control) |
+ OHCI_USB_SUSPEND), &ohci->regs->control);
+ (void) readl_be(&ohci->regs->control);
+ } else
+ release_mem_region(res.start, 0x4);
+ } else
+ pr_debug(__FILE__ ": cannot get ehci offset from fdt\n");
+ }
+
iounmap(hcd->regs);
err_ioremap:
irq_dispose_mapping(irq);
diff --git a/drivers/usb/host/ohci-tmio.c b/drivers/usb/host/ohci-tmio.c
index f9f134af0bd..8dabe8e31d8 100644
--- a/drivers/usb/host/ohci-tmio.c
+++ b/drivers/usb/host/ohci-tmio.c
@@ -201,7 +201,7 @@ static int __devinit ohci_hcd_tmio_drv_probe(struct platform_device *dev)
if (!cell)
return -EINVAL;
- hcd = usb_create_hcd(&ohci_tmio_hc_driver, &dev->dev, dev->dev.bus_id);
+ hcd = usb_create_hcd(&ohci_tmio_hc_driver, &dev->dev, dev_name(&dev->dev));
if (!hcd) {
ret = -ENOMEM;
goto err_usb_create_hcd;
diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
new file mode 100644
index 00000000000..75548f7c716
--- /dev/null
+++ b/drivers/usb/host/oxu210hp-hcd.c
@@ -0,0 +1,3985 @@
+/*
+ * Copyright (c) 2008 Rodolfo Giometti
+ * Copyright (c) 2008 Eurotech S.p.A.
+ *
+ * This code is *strongly* based on EHCI-HCD code by David Brownell since
+ * the chip is a quasi-EHCI compatible.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include "../core/hcd.h"
+
+#include
+#include
+#include
+
+#include
+#include
+
+#include "oxu210hp.h"
+
+#define DRIVER_VERSION "0.0.50"
+
+/*
+ * Main defines
+ */
+
+#define oxu_dbg(oxu, fmt, args...) \
+ dev_dbg(oxu_to_hcd(oxu)->self.controller , fmt , ## args)
+#define oxu_err(oxu, fmt, args...) \
+ dev_err(oxu_to_hcd(oxu)->self.controller , fmt , ## args)
+#define oxu_info(oxu, fmt, args...) \
+ dev_info(oxu_to_hcd(oxu)->self.controller , fmt , ## args)
+
+static inline struct usb_hcd *oxu_to_hcd(struct oxu_hcd *oxu)
+{
+ return container_of((void *) oxu, struct usb_hcd, hcd_priv);
+}
+
+static inline struct oxu_hcd *hcd_to_oxu(struct usb_hcd *hcd)
+{
+ return (struct oxu_hcd *) (hcd->hcd_priv);
+}
+
+/*
+ * Debug stuff
+ */
+
+#undef OXU_URB_TRACE
+#undef OXU_VERBOSE_DEBUG
+
+#ifdef OXU_VERBOSE_DEBUG
+#define oxu_vdbg oxu_dbg
+#else
+#define oxu_vdbg(oxu, fmt, args...) /* Nop */
+#endif
+
+#ifdef DEBUG
+
+static int __attribute__((__unused__))
+dbg_status_buf(char *buf, unsigned len, const char *label, u32 status)
+{
+ return scnprintf(buf, len, "%s%sstatus %04x%s%s%s%s%s%s%s%s%s%s",
+ label, label[0] ? " " : "", status,
+ (status & STS_ASS) ? " Async" : "",
+ (status & STS_PSS) ? " Periodic" : "",
+ (status & STS_RECL) ? " Recl" : "",
+ (status & STS_HALT) ? " Halt" : "",
+ (status & STS_IAA) ? " IAA" : "",
+ (status & STS_FATAL) ? " FATAL" : "",
+ (status & STS_FLR) ? " FLR" : "",
+ (status & STS_PCD) ? " PCD" : "",
+ (status & STS_ERR) ? " ERR" : "",
+ (status & STS_INT) ? " INT" : ""
+ );
+}
+
+static int __attribute__((__unused__))
+dbg_intr_buf(char *buf, unsigned len, const char *label, u32 enable)
+{
+ return scnprintf(buf, len, "%s%sintrenable %02x%s%s%s%s%s%s",
+ label, label[0] ? " " : "", enable,
+ (enable & STS_IAA) ? " IAA" : "",
+ (enable & STS_FATAL) ? " FATAL" : "",
+ (enable & STS_FLR) ? " FLR" : "",
+ (enable & STS_PCD) ? " PCD" : "",
+ (enable & STS_ERR) ? " ERR" : "",
+ (enable & STS_INT) ? " INT" : ""
+ );
+}
+
+static const char *const fls_strings[] =
+ { "1024", "512", "256", "??" };
+
+static int dbg_command_buf(char *buf, unsigned len,
+ const char *label, u32 command)
+{
+ return scnprintf(buf, len,
+ "%s%scommand %06x %s=%d ithresh=%d%s%s%s%s period=%s%s %s",
+ label, label[0] ? " " : "", command,
+ (command & CMD_PARK) ? "park" : "(park)",
+ CMD_PARK_CNT(command),
+ (command >> 16) & 0x3f,
+ (command & CMD_LRESET) ? " LReset" : "",
+ (command & CMD_IAAD) ? " IAAD" : "",
+ (command & CMD_ASE) ? " Async" : "",
+ (command & CMD_PSE) ? " Periodic" : "",
+ fls_strings[(command >> 2) & 0x3],
+ (command & CMD_RESET) ? " Reset" : "",
+ (command & CMD_RUN) ? "RUN" : "HALT"
+ );
+}
+
+static int dbg_port_buf(char *buf, unsigned len, const char *label,
+ int port, u32 status)
+{
+ char *sig;
+
+ /* signaling state */
+ switch (status & (3 << 10)) {
+ case 0 << 10:
+ sig = "se0";
+ break;
+ case 1 << 10:
+ sig = "k"; /* low speed */
+ break;
+ case 2 << 10:
+ sig = "j";
+ break;
+ default:
+ sig = "?";
+ break;
+ }
+
+ return scnprintf(buf, len,
+ "%s%sport %d status %06x%s%s sig=%s%s%s%s%s%s%s%s%s%s",
+ label, label[0] ? " " : "", port, status,
+ (status & PORT_POWER) ? " POWER" : "",
+ (status & PORT_OWNER) ? " OWNER" : "",
+ sig,
+ (status & PORT_RESET) ? " RESET" : "",
+ (status & PORT_SUSPEND) ? " SUSPEND" : "",
+ (status & PORT_RESUME) ? " RESUME" : "",
+ (status & PORT_OCC) ? " OCC" : "",
+ (status & PORT_OC) ? " OC" : "",
+ (status & PORT_PEC) ? " PEC" : "",
+ (status & PORT_PE) ? " PE" : "",
+ (status & PORT_CSC) ? " CSC" : "",
+ (status & PORT_CONNECT) ? " CONNECT" : ""
+ );
+}
+
+#else
+
+static inline int __attribute__((__unused__))
+dbg_status_buf(char *buf, unsigned len, const char *label, u32 status)
+{ return 0; }
+
+static inline int __attribute__((__unused__))
+dbg_command_buf(char *buf, unsigned len, const char *label, u32 command)
+{ return 0; }
+
+static inline int __attribute__((__unused__))
+dbg_intr_buf(char *buf, unsigned len, const char *label, u32 enable)
+{ return 0; }
+
+static inline int __attribute__((__unused__))
+dbg_port_buf(char *buf, unsigned len, const char *label, int port, u32 status)
+{ return 0; }
+
+#endif /* DEBUG */
+
+/* functions have the "wrong" filename when they're output... */
+#define dbg_status(oxu, label, status) { \
+ char _buf[80]; \
+ dbg_status_buf(_buf, sizeof _buf, label, status); \
+ oxu_dbg(oxu, "%s\n", _buf); \
+}
+
+#define dbg_cmd(oxu, label, command) { \
+ char _buf[80]; \
+ dbg_command_buf(_buf, sizeof _buf, label, command); \
+ oxu_dbg(oxu, "%s\n", _buf); \
+}
+
+#define dbg_port(oxu, label, port, status) { \
+ char _buf[80]; \
+ dbg_port_buf(_buf, sizeof _buf, label, port, status); \
+ oxu_dbg(oxu, "%s\n", _buf); \
+}
+
+/*
+ * Module parameters
+ */
+
+/* Initial IRQ latency: faster than hw default */
+static int log2_irq_thresh; /* 0 to 6 */
+module_param(log2_irq_thresh, int, S_IRUGO);
+MODULE_PARM_DESC(log2_irq_thresh, "log2 IRQ latency, 1-64 microframes");
+
+/* Initial park setting: slower than hw default */
+static unsigned park;
+module_param(park, uint, S_IRUGO);
+MODULE_PARM_DESC(park, "park setting; 1-3 back-to-back async packets");
+
+/* For flakey hardware, ignore overcurrent indicators */
+static int ignore_oc;
+module_param(ignore_oc, bool, S_IRUGO);
+MODULE_PARM_DESC(ignore_oc, "ignore bogus hardware overcurrent indications");
+
+
+static void ehci_work(struct oxu_hcd *oxu);
+static int oxu_hub_control(struct usb_hcd *hcd,
+ u16 typeReq, u16 wValue, u16 wIndex,
+ char *buf, u16 wLength);
+
+/*
+ * Local functions
+ */
+
+/* Low level read/write registers functions */
+static inline u32 oxu_readl(void *base, u32 reg)
+{
+ return readl(base + reg);
+}
+
+static inline void oxu_writel(void *base, u32 reg, u32 val)
+{
+ writel(val, base + reg);
+}
+
+static inline void timer_action_done(struct oxu_hcd *oxu,
+ enum ehci_timer_action action)
+{
+ clear_bit(action, &oxu->actions);
+}
+
+static inline void timer_action(struct oxu_hcd *oxu,
+ enum ehci_timer_action action)
+{
+ if (!test_and_set_bit(action, &oxu->actions)) {
+ unsigned long t;
+
+ switch (action) {
+ case TIMER_IAA_WATCHDOG:
+ t = EHCI_IAA_JIFFIES;
+ break;
+ case TIMER_IO_WATCHDOG:
+ t = EHCI_IO_JIFFIES;
+ break;
+ case TIMER_ASYNC_OFF:
+ t = EHCI_ASYNC_JIFFIES;
+ break;
+ case TIMER_ASYNC_SHRINK:
+ default:
+ t = EHCI_SHRINK_JIFFIES;
+ break;
+ }
+ t += jiffies;
+ /* all timings except IAA watchdog can be overridden.
+ * async queue SHRINK often precedes IAA. while it's ready
+ * to go OFF neither can matter, and afterwards the IO
+ * watchdog stops unless there's still periodic traffic.
+ */
+ if (action != TIMER_IAA_WATCHDOG
+ && t > oxu->watchdog.expires
+ && timer_pending(&oxu->watchdog))
+ return;
+ mod_timer(&oxu->watchdog, t);
+ }
+}
+
+/*
+ * handshake - spin reading hc until handshake completes or fails
+ * @ptr: address of hc register to be read
+ * @mask: bits to look at in result of read
+ * @done: value of those bits when handshake succeeds
+ * @usec: timeout in microseconds
+ *
+ * Returns negative errno, or zero on success
+ *
+ * Success happens when the "mask" bits have the specified value (hardware
+ * handshake done). There are two failure modes: "usec" have passed (major
+ * hardware flakeout), or the register reads as all-ones (hardware removed).
+ *
+ * That last failure should_only happen in cases like physical cardbus eject
+ * before driver shutdown. But it also seems to be caused by bugs in cardbus
+ * bridge shutdown: shutting down the bridge before the devices using it.
+ */
+static int handshake(struct oxu_hcd *oxu, void __iomem *ptr,
+ u32 mask, u32 done, int usec)
+{
+ u32 result;
+
+ do {
+ result = readl(ptr);
+ if (result == ~(u32)0) /* card removed */
+ return -ENODEV;
+ result &= mask;
+ if (result == done)
+ return 0;
+ udelay(1);
+ usec--;
+ } while (usec > 0);
+ return -ETIMEDOUT;
+}
+
+/* Force HC to halt state from unknown (EHCI spec section 2.3) */
+static int ehci_halt(struct oxu_hcd *oxu)
+{
+ u32 temp = readl(&oxu->regs->status);
+
+ /* disable any irqs left enabled by previous code */
+ writel(0, &oxu->regs->intr_enable);
+
+ if ((temp & STS_HALT) != 0)
+ return 0;
+
+ temp = readl(&oxu->regs->command);
+ temp &= ~CMD_RUN;
+ writel(temp, &oxu->regs->command);
+ return handshake(oxu, &oxu->regs->status,
+ STS_HALT, STS_HALT, 16 * 125);
+}
+
+/* Put TDI/ARC silicon into EHCI mode */
+static void tdi_reset(struct oxu_hcd *oxu)
+{
+ u32 __iomem *reg_ptr;
+ u32 tmp;
+
+ reg_ptr = (u32 __iomem *)(((u8 __iomem *)oxu->regs) + 0x68);
+ tmp = readl(reg_ptr);
+ tmp |= 0x3;
+ writel(tmp, reg_ptr);
+}
+
+/* Reset a non-running (STS_HALT == 1) controller */
+static int ehci_reset(struct oxu_hcd *oxu)
+{
+ int retval;
+ u32 command = readl(&oxu->regs->command);
+
+ command |= CMD_RESET;
+ dbg_cmd(oxu, "reset", command);
+ writel(command, &oxu->regs->command);
+ oxu_to_hcd(oxu)->state = HC_STATE_HALT;
+ oxu->next_statechange = jiffies;
+ retval = handshake(oxu, &oxu->regs->command,
+ CMD_RESET, 0, 250 * 1000);
+
+ if (retval)
+ return retval;
+
+ tdi_reset(oxu);
+
+ return retval;
+}
+
+/* Idle the controller (from running) */
+static void ehci_quiesce(struct oxu_hcd *oxu)
+{
+ u32 temp;
+
+#ifdef DEBUG
+ if (!HC_IS_RUNNING(oxu_to_hcd(oxu)->state))
+ BUG();
+#endif
+
+ /* wait for any schedule enables/disables to take effect */
+ temp = readl(&oxu->regs->command) << 10;
+ temp &= STS_ASS | STS_PSS;
+ if (handshake(oxu, &oxu->regs->status, STS_ASS | STS_PSS,
+ temp, 16 * 125) != 0) {
+ oxu_to_hcd(oxu)->state = HC_STATE_HALT;
+ return;
+ }
+
+ /* then disable anything that's still active */
+ temp = readl(&oxu->regs->command);
+ temp &= ~(CMD_ASE | CMD_IAAD | CMD_PSE);
+ writel(temp, &oxu->regs->command);
+
+ /* hardware can take 16 microframes to turn off ... */
+ if (handshake(oxu, &oxu->regs->status, STS_ASS | STS_PSS,
+ 0, 16 * 125) != 0) {
+ oxu_to_hcd(oxu)->state = HC_STATE_HALT;
+ return;
+ }
+}
+
+static int check_reset_complete(struct oxu_hcd *oxu, int index,
+ u32 __iomem *status_reg, int port_status)
+{
+ if (!(port_status & PORT_CONNECT)) {
+ oxu->reset_done[index] = 0;
+ return port_status;
+ }
+
+ /* if reset finished and it's still not enabled -- handoff */
+ if (!(port_status & PORT_PE)) {
+ oxu_dbg(oxu, "Failed to enable port %d on root hub TT\n",
+ index+1);
+ return port_status;
+ } else
+ oxu_dbg(oxu, "port %d high speed\n", index + 1);
+
+ return port_status;
+}
+
+static void ehci_hub_descriptor(struct oxu_hcd *oxu,
+ struct usb_hub_descriptor *desc)
+{
+ int ports = HCS_N_PORTS(oxu->hcs_params);
+ u16 temp;
+
+ desc->bDescriptorType = 0x29;
+ desc->bPwrOn2PwrGood = 10; /* oxu 1.0, 2.3.9 says 20ms max */
+ desc->bHubContrCurrent = 0;
+
+ desc->bNbrPorts = ports;
+ temp = 1 + (ports / 8);
+ desc->bDescLength = 7 + 2 * temp;
+
+ /* two bitmaps: ports removable, and usb 1.0 legacy PortPwrCtrlMask */
+ memset(&desc->bitmap[0], 0, temp);
+ memset(&desc->bitmap[temp], 0xff, temp);
+
+ temp = 0x0008; /* per-port overcurrent reporting */
+ if (HCS_PPC(oxu->hcs_params))
+ temp |= 0x0001; /* per-port power control */
+ else
+ temp |= 0x0002; /* no power switching */
+ desc->wHubCharacteristics = (__force __u16)cpu_to_le16(temp);
+}
+
+
+/* Allocate an OXU210HP on-chip memory data buffer
+ *
+ * An on-chip memory data buffer is required for each OXU210HP USB transfer.
+ * Each transfer descriptor has one or more on-chip memory data buffers.
+ *
+ * Data buffers are allocated from a fix sized pool of data blocks.
+ * To minimise fragmentation and give reasonable memory utlisation,
+ * data buffers are allocated with sizes the power of 2 multiples of
+ * the block size, starting on an address a multiple of the allocated size.
+ *
+ * FIXME: callers of this function require a buffer to be allocated for
+ * len=0. This is a waste of on-chip memory and should be fix. Then this
+ * function should be changed to not allocate a buffer for len=0.
+ */
+static int oxu_buf_alloc(struct oxu_hcd *oxu, struct ehci_qtd *qtd, int len)
+{
+ int n_blocks; /* minium blocks needed to hold len */
+ int a_blocks; /* blocks allocated */
+ int i, j;
+
+ /* Don't allocte bigger than supported */
+ if (len > BUFFER_SIZE * BUFFER_NUM) {
+ oxu_err(oxu, "buffer too big (%d)\n", len);
+ return -ENOMEM;
+ }
+
+ spin_lock(&oxu->mem_lock);
+
+ /* Number of blocks needed to hold len */
+ n_blocks = (len + BUFFER_SIZE - 1) / BUFFER_SIZE;
+
+ /* Round the number of blocks up to the power of 2 */
+ for (a_blocks = 1; a_blocks < n_blocks; a_blocks <<= 1)
+ ;
+
+ /* Find a suitable available data buffer */
+ for (i = 0; i < BUFFER_NUM;
+ i += max(a_blocks, (int)oxu->db_used[i])) {
+
+ /* Check all the required blocks are available */
+ for (j = 0; j < a_blocks; j++)
+ if (oxu->db_used[i + j])
+ break;
+
+ if (j != a_blocks)
+ continue;
+
+ /* Allocate blocks found! */
+ qtd->buffer = (void *) &oxu->mem->db_pool[i];
+ qtd->buffer_dma = virt_to_phys(qtd->buffer);
+
+ qtd->qtd_buffer_len = BUFFER_SIZE * a_blocks;
+ oxu->db_used[i] = a_blocks;
+
+ spin_unlock(&oxu->mem_lock);
+
+ return 0;
+ }
+
+ /* Failed */
+
+ spin_unlock(&oxu->mem_lock);
+
+ return -ENOMEM;
+}
+
+static void oxu_buf_free(struct oxu_hcd *oxu, struct ehci_qtd *qtd)
+{
+ int index;
+
+ spin_lock(&oxu->mem_lock);
+
+ index = (qtd->buffer - (void *) &oxu->mem->db_pool[0])
+ / BUFFER_SIZE;
+ oxu->db_used[index] = 0;
+ qtd->qtd_buffer_len = 0;
+ qtd->buffer_dma = 0;
+ qtd->buffer = NULL;
+
+ spin_unlock(&oxu->mem_lock);
+
+ return;
+}
+
+static inline void ehci_qtd_init(struct ehci_qtd *qtd, dma_addr_t dma)
+{
+ memset(qtd, 0, sizeof *qtd);
+ qtd->qtd_dma = dma;
+ qtd->hw_token = cpu_to_le32(QTD_STS_HALT);
+ qtd->hw_next = EHCI_LIST_END;
+ qtd->hw_alt_next = EHCI_LIST_END;
+ INIT_LIST_HEAD(&qtd->qtd_list);
+}
+
+static inline void oxu_qtd_free(struct oxu_hcd *oxu, struct ehci_qtd *qtd)
+{
+ int index;
+
+ if (qtd->buffer)
+ oxu_buf_free(oxu, qtd);
+
+ spin_lock(&oxu->mem_lock);
+
+ index = qtd - &oxu->mem->qtd_pool[0];
+ oxu->qtd_used[index] = 0;
+
+ spin_unlock(&oxu->mem_lock);
+
+ return;
+}
+
+static struct ehci_qtd *ehci_qtd_alloc(struct oxu_hcd *oxu)
+{
+ int i;
+ struct ehci_qtd *qtd = NULL;
+
+ spin_lock(&oxu->mem_lock);
+
+ for (i = 0; i < QTD_NUM; i++)
+ if (!oxu->qtd_used[i])
+ break;
+
+ if (i < QTD_NUM) {
+ qtd = (struct ehci_qtd *) &oxu->mem->qtd_pool[i];
+ memset(qtd, 0, sizeof *qtd);
+
+ qtd->hw_token = cpu_to_le32(QTD_STS_HALT);
+ qtd->hw_next = EHCI_LIST_END;
+ qtd->hw_alt_next = EHCI_LIST_END;
+ INIT_LIST_HEAD(&qtd->qtd_list);
+
+ qtd->qtd_dma = virt_to_phys(qtd);
+
+ oxu->qtd_used[i] = 1;
+ }
+
+ spin_unlock(&oxu->mem_lock);
+
+ return qtd;
+}
+
+static void oxu_qh_free(struct oxu_hcd *oxu, struct ehci_qh *qh)
+{
+ int index;
+
+ spin_lock(&oxu->mem_lock);
+
+ index = qh - &oxu->mem->qh_pool[0];
+ oxu->qh_used[index] = 0;
+
+ spin_unlock(&oxu->mem_lock);
+
+ return;
+}
+
+static void qh_destroy(struct kref *kref)
+{
+ struct ehci_qh *qh = container_of(kref, struct ehci_qh, kref);
+ struct oxu_hcd *oxu = qh->oxu;
+
+ /* clean qtds first, and know this is not linked */
+ if (!list_empty(&qh->qtd_list) || qh->qh_next.ptr) {
+ oxu_dbg(oxu, "unused qh not empty!\n");
+ BUG();
+ }
+ if (qh->dummy)
+ oxu_qtd_free(oxu, qh->dummy);
+ oxu_qh_free(oxu, qh);
+}
+
+static struct ehci_qh *oxu_qh_alloc(struct oxu_hcd *oxu)
+{
+ int i;
+ struct ehci_qh *qh = NULL;
+
+ spin_lock(&oxu->mem_lock);
+
+ for (i = 0; i < QHEAD_NUM; i++)
+ if (!oxu->qh_used[i])
+ break;
+
+ if (i < QHEAD_NUM) {
+ qh = (struct ehci_qh *) &oxu->mem->qh_pool[i];
+ memset(qh, 0, sizeof *qh);
+
+ kref_init(&qh->kref);
+ qh->oxu = oxu;
+ qh->qh_dma = virt_to_phys(qh);
+ INIT_LIST_HEAD(&qh->qtd_list);
+
+ /* dummy td enables safe urb queuing */
+ qh->dummy = ehci_qtd_alloc(oxu);
+ if (qh->dummy == NULL) {
+ oxu_dbg(oxu, "no dummy td\n");
+ oxu->qh_used[i] = 0;
+
+ return NULL;
+ }
+
+ oxu->qh_used[i] = 1;
+ }
+
+ spin_unlock(&oxu->mem_lock);
+
+ return qh;
+}
+
+/* to share a qh (cpu threads, or hc) */
+static inline struct ehci_qh *qh_get(struct ehci_qh *qh)
+{
+ kref_get(&qh->kref);
+ return qh;
+}
+
+static inline void qh_put(struct ehci_qh *qh)
+{
+ kref_put(&qh->kref, qh_destroy);
+}
+
+static void oxu_murb_free(struct oxu_hcd *oxu, struct oxu_murb *murb)
+{
+ int index;
+
+ spin_lock(&oxu->mem_lock);
+
+ index = murb - &oxu->murb_pool[0];
+ oxu->murb_used[index] = 0;
+
+ spin_unlock(&oxu->mem_lock);
+
+ return;
+}
+
+static struct oxu_murb *oxu_murb_alloc(struct oxu_hcd *oxu)
+
+{
+ int i;
+ struct oxu_murb *murb = NULL;
+
+ spin_lock(&oxu->mem_lock);
+
+ for (i = 0; i < MURB_NUM; i++)
+ if (!oxu->murb_used[i])
+ break;
+
+ if (i < MURB_NUM) {
+ murb = &(oxu->murb_pool)[i];
+
+ oxu->murb_used[i] = 1;
+ }
+
+ spin_unlock(&oxu->mem_lock);
+
+ return murb;
+}
+
+/* The queue heads and transfer descriptors are managed from pools tied
+ * to each of the "per device" structures.
+ * This is the initialisation and cleanup code.
+ */
+static void ehci_mem_cleanup(struct oxu_hcd *oxu)
+{
+ kfree(oxu->murb_pool);
+ oxu->murb_pool = NULL;
+
+ if (oxu->async)
+ qh_put(oxu->async);
+ oxu->async = NULL;
+
+ del_timer(&oxu->urb_timer);
+
+ oxu->periodic = NULL;
+
+ /* shadow periodic table */
+ kfree(oxu->pshadow);
+ oxu->pshadow = NULL;
+}
+
+/* Remember to add cleanup code (above) if you add anything here.
+ */
+static int ehci_mem_init(struct oxu_hcd *oxu, gfp_t flags)
+{
+ int i;
+
+ for (i = 0; i < oxu->periodic_size; i++)
+ oxu->mem->frame_list[i] = EHCI_LIST_END;
+ for (i = 0; i < QHEAD_NUM; i++)
+ oxu->qh_used[i] = 0;
+ for (i = 0; i < QTD_NUM; i++)
+ oxu->qtd_used[i] = 0;
+
+ oxu->murb_pool = kcalloc(MURB_NUM, sizeof(struct oxu_murb), flags);
+ if (!oxu->murb_pool)
+ goto fail;
+
+ for (i = 0; i < MURB_NUM; i++)
+ oxu->murb_used[i] = 0;
+
+ oxu->async = oxu_qh_alloc(oxu);
+ if (!oxu->async)
+ goto fail;
+
+ oxu->periodic = (__le32 *) &oxu->mem->frame_list;
+ oxu->periodic_dma = virt_to_phys(oxu->periodic);
+
+ for (i = 0; i < oxu->periodic_size; i++)
+ oxu->periodic[i] = EHCI_LIST_END;
+
+ /* software shadow of hardware table */
+ oxu->pshadow = kcalloc(oxu->periodic_size, sizeof(void *), flags);
+ if (oxu->pshadow != NULL)
+ return 0;
+
+fail:
+ oxu_dbg(oxu, "couldn't init memory\n");
+ ehci_mem_cleanup(oxu);
+ return -ENOMEM;
+}
+
+/* Fill a qtd, returning how much of the buffer we were able to queue up.
+ */
+static int qtd_fill(struct ehci_qtd *qtd, dma_addr_t buf, size_t len,
+ int token, int maxpacket)
+{
+ int i, count;
+ u64 addr = buf;
+
+ /* one buffer entry per 4K ... first might be short or unaligned */
+ qtd->hw_buf[0] = cpu_to_le32((u32)addr);
+ qtd->hw_buf_hi[0] = cpu_to_le32((u32)(addr >> 32));
+ count = 0x1000 - (buf & 0x0fff); /* rest of that page */
+ if (likely(len < count)) /* ... iff needed */
+ count = len;
+ else {
+ buf += 0x1000;
+ buf &= ~0x0fff;
+
+ /* per-qtd limit: from 16K to 20K (best alignment) */
+ for (i = 1; count < len && i < 5; i++) {
+ addr = buf;
+ qtd->hw_buf[i] = cpu_to_le32((u32)addr);
+ qtd->hw_buf_hi[i] = cpu_to_le32((u32)(addr >> 32));
+ buf += 0x1000;
+ if ((count + 0x1000) < len)
+ count += 0x1000;
+ else
+ count = len;
+ }
+
+ /* short packets may only terminate transfers */
+ if (count != len)
+ count -= (count % maxpacket);
+ }
+ qtd->hw_token = cpu_to_le32((count << 16) | token);
+ qtd->length = count;
+
+ return count;
+}
+
+static inline void qh_update(struct oxu_hcd *oxu,
+ struct ehci_qh *qh, struct ehci_qtd *qtd)
+{
+ /* writes to an active overlay are unsafe */
+ BUG_ON(qh->qh_state != QH_STATE_IDLE);
+
+ qh->hw_qtd_next = QTD_NEXT(qtd->qtd_dma);
+ qh->hw_alt_next = EHCI_LIST_END;
+
+ /* Except for control endpoints, we make hardware maintain data
+ * toggle (like OHCI) ... here (re)initialize the toggle in the QH,
+ * and set the pseudo-toggle in udev. Only usb_clear_halt() will
+ * ever clear it.
+ */
+ if (!(qh->hw_info1 & cpu_to_le32(1 << 14))) {
+ unsigned is_out, epnum;
+
+ is_out = !(qtd->hw_token & cpu_to_le32(1 << 8));
+ epnum = (le32_to_cpup(&qh->hw_info1) >> 8) & 0x0f;
+ if (unlikely(!usb_gettoggle(qh->dev, epnum, is_out))) {
+ qh->hw_token &= ~__constant_cpu_to_le32(QTD_TOGGLE);
+ usb_settoggle(qh->dev, epnum, is_out, 1);
+ }
+ }
+
+ /* HC must see latest qtd and qh data before we clear ACTIVE+HALT */
+ wmb();
+ qh->hw_token &= __constant_cpu_to_le32(QTD_TOGGLE | QTD_STS_PING);
+}
+
+/* If it weren't for a common silicon quirk (writing the dummy into the qh
+ * overlay, so qh->hw_token wrongly becomes inactive/halted), only fault
+ * recovery (including urb dequeue) would need software changes to a QH...
+ */
+static void qh_refresh(struct oxu_hcd *oxu, struct ehci_qh *qh)
+{
+ struct ehci_qtd *qtd;
+
+ if (list_empty(&qh->qtd_list))
+ qtd = qh->dummy;
+ else {
+ qtd = list_entry(qh->qtd_list.next,
+ struct ehci_qtd, qtd_list);
+ /* first qtd may already be partially processed */
+ if (cpu_to_le32(qtd->qtd_dma) == qh->hw_current)
+ qtd = NULL;
+ }
+
+ if (qtd)
+ qh_update(oxu, qh, qtd);
+}
+
+static void qtd_copy_status(struct oxu_hcd *oxu, struct urb *urb,
+ size_t length, u32 token)
+{
+ /* count IN/OUT bytes, not SETUP (even short packets) */
+ if (likely(QTD_PID(token) != 2))
+ urb->actual_length += length - QTD_LENGTH(token);
+
+ /* don't modify error codes */
+ if (unlikely(urb->status != -EINPROGRESS))
+ return;
+
+ /* force cleanup after short read; not always an error */
+ if (unlikely(IS_SHORT_READ(token)))
+ urb->status = -EREMOTEIO;
+
+ /* serious "can't proceed" faults reported by the hardware */
+ if (token & QTD_STS_HALT) {
+ if (token & QTD_STS_BABBLE) {
+ /* FIXME "must" disable babbling device's port too */
+ urb->status = -EOVERFLOW;
+ } else if (token & QTD_STS_MMF) {
+ /* fs/ls interrupt xfer missed the complete-split */
+ urb->status = -EPROTO;
+ } else if (token & QTD_STS_DBE) {
+ urb->status = (QTD_PID(token) == 1) /* IN ? */
+ ? -ENOSR /* hc couldn't read data */
+ : -ECOMM; /* hc couldn't write data */
+ } else if (token & QTD_STS_XACT) {
+ /* timeout, bad crc, wrong PID, etc; retried */
+ if (QTD_CERR(token))
+ urb->status = -EPIPE;
+ else {
+ oxu_dbg(oxu, "devpath %s ep%d%s 3strikes\n",
+ urb->dev->devpath,
+ usb_pipeendpoint(urb->pipe),
+ usb_pipein(urb->pipe) ? "in" : "out");
+ urb->status = -EPROTO;
+ }
+ /* CERR nonzero + no errors + halt --> stall */
+ } else if (QTD_CERR(token))
+ urb->status = -EPIPE;
+ else /* unknown */
+ urb->status = -EPROTO;
+
+ oxu_vdbg(oxu, "dev%d ep%d%s qtd token %08x --> status %d\n",
+ usb_pipedevice(urb->pipe),
+ usb_pipeendpoint(urb->pipe),
+ usb_pipein(urb->pipe) ? "in" : "out",
+ token, urb->status);
+ }
+}
+
+static void ehci_urb_done(struct oxu_hcd *oxu, struct urb *urb)
+__releases(oxu->lock)
+__acquires(oxu->lock)
+{
+ if (likely(urb->hcpriv != NULL)) {
+ struct ehci_qh *qh = (struct ehci_qh *) urb->hcpriv;
+
+ /* S-mask in a QH means it's an interrupt urb */
+ if ((qh->hw_info2 & __constant_cpu_to_le32(QH_SMASK)) != 0) {
+
+ /* ... update hc-wide periodic stats (for usbfs) */
+ oxu_to_hcd(oxu)->self.bandwidth_int_reqs--;
+ }
+ qh_put(qh);
+ }
+
+ urb->hcpriv = NULL;
+ switch (urb->status) {
+ case -EINPROGRESS: /* success */
+ urb->status = 0;
+ default: /* fault */
+ break;
+ case -EREMOTEIO: /* fault or normal */
+ if (!(urb->transfer_flags & URB_SHORT_NOT_OK))
+ urb->status = 0;
+ break;
+ case -ECONNRESET: /* canceled */
+ case -ENOENT:
+ break;
+ }
+
+#ifdef OXU_URB_TRACE
+ oxu_dbg(oxu, "%s %s urb %p ep%d%s status %d len %d/%d\n",
+ __func__, urb->dev->devpath, urb,
+ usb_pipeendpoint(urb->pipe),
+ usb_pipein(urb->pipe) ? "in" : "out",
+ urb->status,
+ urb->actual_length, urb->transfer_buffer_length);
+#endif
+
+ /* complete() can reenter this HCD */
+ spin_unlock(&oxu->lock);
+ usb_hcd_giveback_urb(oxu_to_hcd(oxu), urb, urb->status);
+ spin_lock(&oxu->lock);
+}
+
+static void start_unlink_async(struct oxu_hcd *oxu, struct ehci_qh *qh);
+static void unlink_async(struct oxu_hcd *oxu, struct ehci_qh *qh);
+
+static void intr_deschedule(struct oxu_hcd *oxu, struct ehci_qh *qh);
+static int qh_schedule(struct oxu_hcd *oxu, struct ehci_qh *qh);
+
+#define HALT_BIT __constant_cpu_to_le32(QTD_STS_HALT)
+
+/* Process and free completed qtds for a qh, returning URBs to drivers.
+ * Chases up to qh->hw_current. Returns number of completions called,
+ * indicating how much "real" work we did.
+ */
+static unsigned qh_completions(struct oxu_hcd *oxu, struct ehci_qh *qh)
+{
+ struct ehci_qtd *last = NULL, *end = qh->dummy;
+ struct list_head *entry, *tmp;
+ int stopped;
+ unsigned count = 0;
+ int do_status = 0;
+ u8 state;
+ struct oxu_murb *murb = NULL;
+
+ if (unlikely(list_empty(&qh->qtd_list)))
+ return count;
+
+ /* completions (or tasks on other cpus) must never clobber HALT
+ * till we've gone through and cleaned everything up, even when
+ * they add urbs to this qh's queue or mark them for unlinking.
+ *
+ * NOTE: unlinking expects to be done in queue order.
+ */
+ state = qh->qh_state;
+ qh->qh_state = QH_STATE_COMPLETING;
+ stopped = (state == QH_STATE_IDLE);
+
+ /* remove de-activated QTDs from front of queue.
+ * after faults (including short reads), cleanup this urb
+ * then let the queue advance.
+ * if queue is stopped, handles unlinks.
+ */
+ list_for_each_safe(entry, tmp, &qh->qtd_list) {
+ struct ehci_qtd *qtd;
+ struct urb *urb;
+ u32 token = 0;
+
+ qtd = list_entry(entry, struct ehci_qtd, qtd_list);
+ urb = qtd->urb;
+
+ /* Clean up any state from previous QTD ...*/
+ if (last) {
+ if (likely(last->urb != urb)) {
+ if (last->urb->complete == NULL) {
+ murb = (struct oxu_murb *) last->urb;
+ last->urb = murb->main;
+ if (murb->last) {
+ ehci_urb_done(oxu, last->urb);
+ count++;
+ }
+ oxu_murb_free(oxu, murb);
+ } else {
+ ehci_urb_done(oxu, last->urb);
+ count++;
+ }
+ }
+ oxu_qtd_free(oxu, last);
+ last = NULL;
+ }
+
+ /* ignore urbs submitted during completions we reported */
+ if (qtd == end)
+ break;
+
+ /* hardware copies qtd out of qh overlay */
+ rmb();
+ token = le32_to_cpu(qtd->hw_token);
+
+ /* always clean up qtds the hc de-activated */
+ if ((token & QTD_STS_ACTIVE) == 0) {
+
+ if ((token & QTD_STS_HALT) != 0) {
+ stopped = 1;
+
+ /* magic dummy for some short reads; qh won't advance.
+ * that silicon quirk can kick in with this dummy too.
+ */
+ } else if (IS_SHORT_READ(token) &&
+ !(qtd->hw_alt_next & EHCI_LIST_END)) {
+ stopped = 1;
+ goto halt;
+ }
+
+ /* stop scanning when we reach qtds the hc is using */
+ } else if (likely(!stopped &&
+ HC_IS_RUNNING(oxu_to_hcd(oxu)->state))) {
+ break;
+
+ } else {
+ stopped = 1;
+
+ if (unlikely(!HC_IS_RUNNING(oxu_to_hcd(oxu)->state)))
+ urb->status = -ESHUTDOWN;
+
+ /* ignore active urbs unless some previous qtd
+ * for the urb faulted (including short read) or
+ * its urb was canceled. we may patch qh or qtds.
+ */
+ if (likely(urb->status == -EINPROGRESS))
+ continue;
+
+ /* issue status after short control reads */
+ if (unlikely(do_status != 0)
+ && QTD_PID(token) == 0 /* OUT */) {
+ do_status = 0;
+ continue;
+ }
+
+ /* token in overlay may be most current */
+ if (state == QH_STATE_IDLE
+ && cpu_to_le32(qtd->qtd_dma)
+ == qh->hw_current)
+ token = le32_to_cpu(qh->hw_token);
+
+ /* force halt for unlinked or blocked qh, so we'll
+ * patch the qh later and so that completions can't
+ * activate it while we "know" it's stopped.
+ */
+ if ((HALT_BIT & qh->hw_token) == 0) {
+halt:
+ qh->hw_token |= HALT_BIT;
+ wmb();
+ }
+ }
+
+ /* Remove it from the queue */
+ qtd_copy_status(oxu, urb->complete ?
+ urb : ((struct oxu_murb *) urb)->main,
+ qtd->length, token);
+ if ((usb_pipein(qtd->urb->pipe)) &&
+ (NULL != qtd->transfer_buffer))
+ memcpy(qtd->transfer_buffer, qtd->buffer, qtd->length);
+ do_status = (urb->status == -EREMOTEIO)
+ && usb_pipecontrol(urb->pipe);
+
+ if (stopped && qtd->qtd_list.prev != &qh->qtd_list) {
+ last = list_entry(qtd->qtd_list.prev,
+ struct ehci_qtd, qtd_list);
+ last->hw_next = qtd->hw_next;
+ }
+ list_del(&qtd->qtd_list);
+ last = qtd;
+ }
+
+ /* last urb's completion might still need calling */
+ if (likely(last != NULL)) {
+ if (last->urb->complete == NULL) {
+ murb = (struct oxu_murb *) last->urb;
+ last->urb = murb->main;
+ if (murb->last) {
+ ehci_urb_done(oxu, last->urb);
+ count++;
+ }
+ oxu_murb_free(oxu, murb);
+ } else {
+ ehci_urb_done(oxu, last->urb);
+ count++;
+ }
+ oxu_qtd_free(oxu, last);
+ }
+
+ /* restore original state; caller must unlink or relink */
+ qh->qh_state = state;
+
+ /* be sure the hardware's done with the qh before refreshing
+ * it after fault cleanup, or recovering from silicon wrongly
+ * overlaying the dummy qtd (which reduces DMA chatter).
+ */
+ if (stopped != 0 || qh->hw_qtd_next == EHCI_LIST_END) {
+ switch (state) {
+ case QH_STATE_IDLE:
+ qh_refresh(oxu, qh);
+ break;
+ case QH_STATE_LINKED:
+ /* should be rare for periodic transfers,
+ * except maybe high bandwidth ...
+ */
+ if ((__constant_cpu_to_le32(QH_SMASK)
+ & qh->hw_info2) != 0) {
+ intr_deschedule(oxu, qh);
+ (void) qh_schedule(oxu, qh);
+ } else
+ unlink_async(oxu, qh);
+ break;
+ /* otherwise, unlink already started */
+ }
+ }
+
+ return count;
+}
+
+/* High bandwidth multiplier, as encoded in highspeed endpoint descriptors */
+#define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03))
+/* ... and packet size, for any kind of endpoint descriptor */
+#define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff)
+
+/* Reverse of qh_urb_transaction: free a list of TDs.
+ * used for cleanup after errors, before HC sees an URB's TDs.
+ */
+static void qtd_list_free(struct oxu_hcd *oxu,
+ struct urb *urb, struct list_head *qtd_list)
+{
+ struct list_head *entry, *temp;
+
+ list_for_each_safe(entry, temp, qtd_list) {
+ struct ehci_qtd *qtd;
+
+ qtd = list_entry(entry, struct ehci_qtd, qtd_list);
+ list_del(&qtd->qtd_list);
+ oxu_qtd_free(oxu, qtd);
+ }
+}
+
+/* Create a list of filled qtds for this URB; won't link into qh.
+ */
+static struct list_head *qh_urb_transaction(struct oxu_hcd *oxu,
+ struct urb *urb,
+ struct list_head *head,
+ gfp_t flags)
+{
+ struct ehci_qtd *qtd, *qtd_prev;
+ dma_addr_t buf;
+ int len, maxpacket;
+ int is_input;
+ u32 token;
+ void *transfer_buf = NULL;
+ int ret;
+
+ /*
+ * URBs map to sequences of QTDs: one logical transaction
+ */
+ qtd = ehci_qtd_alloc(oxu);
+ if (unlikely(!qtd))
+ return NULL;
+ list_add_tail(&qtd->qtd_list, head);
+ qtd->urb = urb;
+
+ token = QTD_STS_ACTIVE;
+ token |= (EHCI_TUNE_CERR << 10);
+ /* for split transactions, SplitXState initialized to zero */
+
+ len = urb->transfer_buffer_length;
+ is_input = usb_pipein(urb->pipe);
+ if (!urb->transfer_buffer && urb->transfer_buffer_length && is_input)
+ urb->transfer_buffer = phys_to_virt(urb->transfer_dma);
+
+ if (usb_pipecontrol(urb->pipe)) {
+ /* SETUP pid */
+ ret = oxu_buf_alloc(oxu, qtd, sizeof(struct usb_ctrlrequest));
+ if (ret)
+ goto cleanup;
+
+ qtd_fill(qtd, qtd->buffer_dma, sizeof(struct usb_ctrlrequest),
+ token | (2 /* "setup" */ << 8), 8);
+ memcpy(qtd->buffer, qtd->urb->setup_packet,
+ sizeof(struct usb_ctrlrequest));
+
+ /* ... and always at least one more pid */
+ token ^= QTD_TOGGLE;
+ qtd_prev = qtd;
+ qtd = ehci_qtd_alloc(oxu);
+ if (unlikely(!qtd))
+ goto cleanup;
+ qtd->urb = urb;
+ qtd_prev->hw_next = QTD_NEXT(qtd->qtd_dma);
+ list_add_tail(&qtd->qtd_list, head);
+
+ /* for zero length DATA stages, STATUS is always IN */
+ if (len == 0)
+ token |= (1 /* "in" */ << 8);
+ }
+
+ /*
+ * Data transfer stage: buffer setup
+ */
+
+ ret = oxu_buf_alloc(oxu, qtd, len);
+ if (ret)
+ goto cleanup;
+
+ buf = qtd->buffer_dma;
+ transfer_buf = urb->transfer_buffer;
+
+ if (!is_input)
+ memcpy(qtd->buffer, qtd->urb->transfer_buffer, len);
+
+ if (is_input)
+ token |= (1 /* "in" */ << 8);
+ /* else it's already initted to "out" pid (0 << 8) */
+
+ maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, !is_input));
+
+ /*
+ * buffer gets wrapped in one or more qtds;
+ * last one may be "short" (including zero len)
+ * and may serve as a control status ack
+ */
+ for (;;) {
+ int this_qtd_len;
+
+ this_qtd_len = qtd_fill(qtd, buf, len, token, maxpacket);
+ qtd->transfer_buffer = transfer_buf;
+ len -= this_qtd_len;
+ buf += this_qtd_len;
+ transfer_buf += this_qtd_len;
+ if (is_input)
+ qtd->hw_alt_next = oxu->async->hw_alt_next;
+
+ /* qh makes control packets use qtd toggle; maybe switch it */
+ if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0)
+ token ^= QTD_TOGGLE;
+
+ if (likely(len <= 0))
+ break;
+
+ qtd_prev = qtd;
+ qtd = ehci_qtd_alloc(oxu);
+ if (unlikely(!qtd))
+ goto cleanup;
+ if (likely(len > 0)) {
+ ret = oxu_buf_alloc(oxu, qtd, len);
+ if (ret)
+ goto cleanup;
+ }
+ qtd->urb = urb;
+ qtd_prev->hw_next = QTD_NEXT(qtd->qtd_dma);
+ list_add_tail(&qtd->qtd_list, head);
+ }
+
+ /* unless the bulk/interrupt caller wants a chance to clean
+ * up after short reads, hc should advance qh past this urb
+ */
+ if (likely((urb->transfer_flags & URB_SHORT_NOT_OK) == 0
+ || usb_pipecontrol(urb->pipe)))
+ qtd->hw_alt_next = EHCI_LIST_END;
+
+ /*
+ * control requests may need a terminating data "status" ack;
+ * bulk ones may need a terminating short packet (zero length).
+ */
+ if (likely(urb->transfer_buffer_length != 0)) {
+ int one_more = 0;
+
+ if (usb_pipecontrol(urb->pipe)) {
+ one_more = 1;
+ token ^= 0x0100; /* "in" <--> "out" */
+ token |= QTD_TOGGLE; /* force DATA1 */
+ } else if (usb_pipebulk(urb->pipe)
+ && (urb->transfer_flags & URB_ZERO_PACKET)
+ && !(urb->transfer_buffer_length % maxpacket)) {
+ one_more = 1;
+ }
+ if (one_more) {
+ qtd_prev = qtd;
+ qtd = ehci_qtd_alloc(oxu);
+ if (unlikely(!qtd))
+ goto cleanup;
+ qtd->urb = urb;
+ qtd_prev->hw_next = QTD_NEXT(qtd->qtd_dma);
+ list_add_tail(&qtd->qtd_list, head);
+
+ /* never any data in such packets */
+ qtd_fill(qtd, 0, 0, token, 0);
+ }
+ }
+
+ /* by default, enable interrupt on urb completion */
+ qtd->hw_token |= __constant_cpu_to_le32(QTD_IOC);
+ return head;
+
+cleanup:
+ qtd_list_free(oxu, urb, head);
+ return NULL;
+}
+
+/* Each QH holds a qtd list; a QH is used for everything except iso.
+ *
+ * For interrupt urbs, the scheduler must set the microframe scheduling
+ * mask(s) each time the QH gets scheduled. For highspeed, that's
+ * just one microframe in the s-mask. For split interrupt transactions
+ * there are additional complications: c-mask, maybe FSTNs.
+ */
+static struct ehci_qh *qh_make(struct oxu_hcd *oxu,
+ struct urb *urb, gfp_t flags)
+{
+ struct ehci_qh *qh = oxu_qh_alloc(oxu);
+ u32 info1 = 0, info2 = 0;
+ int is_input, type;
+ int maxp = 0;
+
+ if (!qh)
+ return qh;
+
+ /*
+ * init endpoint/device data for this QH
+ */
+ info1 |= usb_pipeendpoint(urb->pipe) << 8;
+ info1 |= usb_pipedevice(urb->pipe) << 0;
+
+ is_input = usb_pipein(urb->pipe);
+ type = usb_pipetype(urb->pipe);
+ maxp = usb_maxpacket(urb->dev, urb->pipe, !is_input);
+
+ /* Compute interrupt scheduling parameters just once, and save.
+ * - allowing for high bandwidth, how many nsec/uframe are used?
+ * - split transactions need a second CSPLIT uframe; same question
+ * - splits also need a schedule gap (for full/low speed I/O)
+ * - qh has a polling interval
+ *
+ * For control/bulk requests, the HC or TT handles these.
+ */
+ if (type == PIPE_INTERRUPT) {
+ qh->usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH,
+ is_input, 0,
+ hb_mult(maxp) * max_packet(maxp)));
+ qh->start = NO_FRAME;
+
+ if (urb->dev->speed == USB_SPEED_HIGH) {
+ qh->c_usecs = 0;
+ qh->gap_uf = 0;
+
+ qh->period = urb->interval >> 3;
+ if (qh->period == 0 && urb->interval != 1) {
+ /* NOTE interval 2 or 4 uframes could work.
+ * But interval 1 scheduling is simpler, and
+ * includes high bandwidth.
+ */
+ dbg("intr period %d uframes, NYET!",
+ urb->interval);
+ goto done;
+ }
+ } else {
+ struct usb_tt *tt = urb->dev->tt;
+ int think_time;
+
+ /* gap is f(FS/LS transfer times) */
+ qh->gap_uf = 1 + usb_calc_bus_time(urb->dev->speed,
+ is_input, 0, maxp) / (125 * 1000);
+
+ /* FIXME this just approximates SPLIT/CSPLIT times */
+ if (is_input) { /* SPLIT, gap, CSPLIT+DATA */
+ qh->c_usecs = qh->usecs + HS_USECS(0);
+ qh->usecs = HS_USECS(1);
+ } else { /* SPLIT+DATA, gap, CSPLIT */
+ qh->usecs += HS_USECS(1);
+ qh->c_usecs = HS_USECS(0);
+ }
+
+ think_time = tt ? tt->think_time : 0;
+ qh->tt_usecs = NS_TO_US(think_time +
+ usb_calc_bus_time(urb->dev->speed,
+ is_input, 0, max_packet(maxp)));
+ qh->period = urb->interval;
+ }
+ }
+
+ /* support for tt scheduling, and access to toggles */
+ qh->dev = urb->dev;
+
+ /* using TT? */
+ switch (urb->dev->speed) {
+ case USB_SPEED_LOW:
+ info1 |= (1 << 12); /* EPS "low" */
+ /* FALL THROUGH */
+
+ case USB_SPEED_FULL:
+ /* EPS 0 means "full" */
+ if (type != PIPE_INTERRUPT)
+ info1 |= (EHCI_TUNE_RL_TT << 28);
+ if (type == PIPE_CONTROL) {
+ info1 |= (1 << 27); /* for TT */
+ info1 |= 1 << 14; /* toggle from qtd */
+ }
+ info1 |= maxp << 16;
+
+ info2 |= (EHCI_TUNE_MULT_TT << 30);
+ info2 |= urb->dev->ttport << 23;
+
+ /* NOTE: if (PIPE_INTERRUPT) { scheduler sets c-mask } */
+
+ break;
+
+ case USB_SPEED_HIGH: /* no TT involved */
+ info1 |= (2 << 12); /* EPS "high" */
+ if (type == PIPE_CONTROL) {
+ info1 |= (EHCI_TUNE_RL_HS << 28);
+ info1 |= 64 << 16; /* usb2 fixed maxpacket */
+ info1 |= 1 << 14; /* toggle from qtd */
+ info2 |= (EHCI_TUNE_MULT_HS << 30);
+ } else if (type == PIPE_BULK) {
+ info1 |= (EHCI_TUNE_RL_HS << 28);
+ info1 |= 512 << 16; /* usb2 fixed maxpacket */
+ info2 |= (EHCI_TUNE_MULT_HS << 30);
+ } else { /* PIPE_INTERRUPT */
+ info1 |= max_packet(maxp) << 16;
+ info2 |= hb_mult(maxp) << 30;
+ }
+ break;
+ default:
+ dbg("bogus dev %p speed %d", urb->dev, urb->dev->speed);
+done:
+ qh_put(qh);
+ return NULL;
+ }
+
+ /* NOTE: if (PIPE_INTERRUPT) { scheduler sets s-mask } */
+
+ /* init as live, toggle clear, advance to dummy */
+ qh->qh_state = QH_STATE_IDLE;
+ qh->hw_info1 = cpu_to_le32(info1);
+ qh->hw_info2 = cpu_to_le32(info2);
+ usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), !is_input, 1);
+ qh_refresh(oxu, qh);
+ return qh;
+}
+
+/* Move qh (and its qtds) onto async queue; maybe enable queue.
+ */
+static void qh_link_async(struct oxu_hcd *oxu, struct ehci_qh *qh)
+{
+ __le32 dma = QH_NEXT(qh->qh_dma);
+ struct ehci_qh *head;
+
+ /* (re)start the async schedule? */
+ head = oxu->async;
+ timer_action_done(oxu, TIMER_ASYNC_OFF);
+ if (!head->qh_next.qh) {
+ u32 cmd = readl(&oxu->regs->command);
+
+ if (!(cmd & CMD_ASE)) {
+ /* in case a clear of CMD_ASE didn't take yet */
+ (void)handshake(oxu, &oxu->regs->status,
+ STS_ASS, 0, 150);
+ cmd |= CMD_ASE | CMD_RUN;
+ writel(cmd, &oxu->regs->command);
+ oxu_to_hcd(oxu)->state = HC_STATE_RUNNING;
+ /* posted write need not be known to HC yet ... */
+ }
+ }
+
+ /* clear halt and/or toggle; and maybe recover from silicon quirk */
+ if (qh->qh_state == QH_STATE_IDLE)
+ qh_refresh(oxu, qh);
+
+ /* splice right after start */
+ qh->qh_next = head->qh_next;
+ qh->hw_next = head->hw_next;
+ wmb();
+
+ head->qh_next.qh = qh;
+ head->hw_next = dma;
+
+ qh->qh_state = QH_STATE_LINKED;
+ /* qtd completions reported later by interrupt */
+}
+
+#define QH_ADDR_MASK __constant_cpu_to_le32(0x7f)
+
+/*
+ * For control/bulk/interrupt, return QH with these TDs appended.
+ * Allocates and initializes the QH if necessary.
+ * Returns null if it can't allocate a QH it needs to.
+ * If the QH has TDs (urbs) already, that's great.
+ */
+static struct ehci_qh *qh_append_tds(struct oxu_hcd *oxu,
+ struct urb *urb, struct list_head *qtd_list,
+ int epnum, void **ptr)
+{
+ struct ehci_qh *qh = NULL;
+
+ qh = (struct ehci_qh *) *ptr;
+ if (unlikely(qh == NULL)) {
+ /* can't sleep here, we have oxu->lock... */
+ qh = qh_make(oxu, urb, GFP_ATOMIC);
+ *ptr = qh;
+ }
+ if (likely(qh != NULL)) {
+ struct ehci_qtd *qtd;
+
+ if (unlikely(list_empty(qtd_list)))
+ qtd = NULL;
+ else
+ qtd = list_entry(qtd_list->next, struct ehci_qtd,
+ qtd_list);
+
+ /* control qh may need patching ... */
+ if (unlikely(epnum == 0)) {
+
+ /* usb_reset_device() briefly reverts to address 0 */
+ if (usb_pipedevice(urb->pipe) == 0)
+ qh->hw_info1 &= ~QH_ADDR_MASK;
+ }
+
+ /* just one way to queue requests: swap with the dummy qtd.
+ * only hc or qh_refresh() ever modify the overlay.
+ */
+ if (likely(qtd != NULL)) {
+ struct ehci_qtd *dummy;
+ dma_addr_t dma;
+ __le32 token;
+
+ /* to avoid racing the HC, use the dummy td instead of
+ * the first td of our list (becomes new dummy). both
+ * tds stay deactivated until we're done, when the
+ * HC is allowed to fetch the old dummy (4.10.2).
+ */
+ token = qtd->hw_token;
+ qtd->hw_token = HALT_BIT;
+ wmb();
+ dummy = qh->dummy;
+
+ dma = dummy->qtd_dma;
+ *dummy = *qtd;
+ dummy->qtd_dma = dma;
+
+ list_del(&qtd->qtd_list);
+ list_add(&dummy->qtd_list, qtd_list);
+ list_splice(qtd_list, qh->qtd_list.prev);
+
+ ehci_qtd_init(qtd, qtd->qtd_dma);
+ qh->dummy = qtd;
+
+ /* hc must see the new dummy at list end */
+ dma = qtd->qtd_dma;
+ qtd = list_entry(qh->qtd_list.prev,
+ struct ehci_qtd, qtd_list);
+ qtd->hw_next = QTD_NEXT(dma);
+
+ /* let the hc process these next qtds */
+ dummy->hw_token = (token & ~(0x80));
+ wmb();
+ dummy->hw_token = token;
+
+ urb->hcpriv = qh_get(qh);
+ }
+ }
+ return qh;
+}
+
+static int submit_async(struct oxu_hcd *oxu, struct urb *urb,
+ struct list_head *qtd_list, gfp_t mem_flags)
+{
+ struct ehci_qtd *qtd;
+ int epnum;
+ unsigned long flags;
+ struct ehci_qh *qh = NULL;
+ int rc = 0;
+
+ qtd = list_entry(qtd_list->next, struct ehci_qtd, qtd_list);
+ epnum = urb->ep->desc.bEndpointAddress;
+
+#ifdef OXU_URB_TRACE
+ oxu_dbg(oxu, "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n",
+ __func__, urb->dev->devpath, urb,
+ epnum & 0x0f, (epnum & USB_DIR_IN) ? "in" : "out",
+ urb->transfer_buffer_length,
+ qtd, urb->ep->hcpriv);
+#endif
+
+ spin_lock_irqsave(&oxu->lock, flags);
+ if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE,
+ &oxu_to_hcd(oxu)->flags))) {
+ rc = -ESHUTDOWN;
+ goto done;
+ }
+
+ qh = qh_append_tds(oxu, urb, qtd_list, epnum, &urb->ep->hcpriv);
+ if (unlikely(qh == NULL)) {
+ rc = -ENOMEM;
+ goto done;
+ }
+
+ /* Control/bulk operations through TTs don't need scheduling,
+ * the HC and TT handle it when the TT has a buffer ready.
+ */
+ if (likely(qh->qh_state == QH_STATE_IDLE))
+ qh_link_async(oxu, qh_get(qh));
+done:
+ spin_unlock_irqrestore(&oxu->lock, flags);
+ if (unlikely(qh == NULL))
+ qtd_list_free(oxu, urb, qtd_list);
+ return rc;
+}
+
+/* The async qh for the qtds being reclaimed are now unlinked from the HC */
+
+static void end_unlink_async(struct oxu_hcd *oxu)
+{
+ struct ehci_qh *qh = oxu->reclaim;
+ struct ehci_qh *next;
+
+ timer_action_done(oxu, TIMER_IAA_WATCHDOG);
+
+ qh->qh_state = QH_STATE_IDLE;
+ qh->qh_next.qh = NULL;
+ qh_put(qh); /* refcount from reclaim */
+
+ /* other unlink(s) may be pending (in QH_STATE_UNLINK_WAIT) */
+ next = qh->reclaim;
+ oxu->reclaim = next;
+ oxu->reclaim_ready = 0;
+ qh->reclaim = NULL;
+
+ qh_completions(oxu, qh);
+
+ if (!list_empty(&qh->qtd_list)
+ && HC_IS_RUNNING(oxu_to_hcd(oxu)->state))
+ qh_link_async(oxu, qh);
+ else {
+ qh_put(qh); /* refcount from async list */
+
+ /* it's not free to turn the async schedule on/off; leave it
+ * active but idle for a while once it empties.
+ */
+ if (HC_IS_RUNNING(oxu_to_hcd(oxu)->state)
+ && oxu->async->qh_next.qh == NULL)
+ timer_action(oxu, TIMER_ASYNC_OFF);
+ }
+
+ if (next) {
+ oxu->reclaim = NULL;
+ start_unlink_async(oxu, next);
+ }
+}
+
+/* makes sure the async qh will become idle */
+/* caller must own oxu->lock */
+
+static void start_unlink_async(struct oxu_hcd *oxu, struct ehci_qh *qh)
+{
+ int cmd = readl(&oxu->regs->command);
+ struct ehci_qh *prev;
+
+#ifdef DEBUG
+ assert_spin_locked(&oxu->lock);
+ if (oxu->reclaim || (qh->qh_state != QH_STATE_LINKED
+ && qh->qh_state != QH_STATE_UNLINK_WAIT))
+ BUG();
+#endif
+
+ /* stop async schedule right now? */
+ if (unlikely(qh == oxu->async)) {
+ /* can't get here without STS_ASS set */
+ if (oxu_to_hcd(oxu)->state != HC_STATE_HALT
+ && !oxu->reclaim) {
+ /* ... and CMD_IAAD clear */
+ writel(cmd & ~CMD_ASE, &oxu->regs->command);
+ wmb();
+ /* handshake later, if we need to */
+ timer_action_done(oxu, TIMER_ASYNC_OFF);
+ }
+ return;
+ }
+
+ qh->qh_state = QH_STATE_UNLINK;
+ oxu->reclaim = qh = qh_get(qh);
+
+ prev = oxu->async;
+ while (prev->qh_next.qh != qh)
+ prev = prev->qh_next.qh;
+
+ prev->hw_next = qh->hw_next;
+ prev->qh_next = qh->qh_next;
+ wmb();
+
+ if (unlikely(oxu_to_hcd(oxu)->state == HC_STATE_HALT)) {
+ /* if (unlikely(qh->reclaim != 0))
+ * this will recurse, probably not much
+ */
+ end_unlink_async(oxu);
+ return;
+ }
+
+ oxu->reclaim_ready = 0;
+ cmd |= CMD_IAAD;
+ writel(cmd, &oxu->regs->command);
+ (void) readl(&oxu->regs->command);
+ timer_action(oxu, TIMER_IAA_WATCHDOG);
+}
+
+static void scan_async(struct oxu_hcd *oxu)
+{
+ struct ehci_qh *qh;
+ enum ehci_timer_action action = TIMER_IO_WATCHDOG;
+
+ if (!++(oxu->stamp))
+ oxu->stamp++;
+ timer_action_done(oxu, TIMER_ASYNC_SHRINK);
+rescan:
+ qh = oxu->async->qh_next.qh;
+ if (likely(qh != NULL)) {
+ do {
+ /* clean any finished work for this qh */
+ if (!list_empty(&qh->qtd_list)
+ && qh->stamp != oxu->stamp) {
+ int temp;
+
+ /* unlinks could happen here; completion
+ * reporting drops the lock. rescan using
+ * the latest schedule, but don't rescan
+ * qhs we already finished (no looping).
+ */
+ qh = qh_get(qh);
+ qh->stamp = oxu->stamp;
+ temp = qh_completions(oxu, qh);
+ qh_put(qh);
+ if (temp != 0)
+ goto rescan;
+ }
+
+ /* unlink idle entries, reducing HC PCI usage as well
+ * as HCD schedule-scanning costs. delay for any qh
+ * we just scanned, there's a not-unusual case that it
+ * doesn't stay idle for long.
+ * (plus, avoids some kind of re-activation race.)
+ */
+ if (list_empty(&qh->qtd_list)) {
+ if (qh->stamp == oxu->stamp)
+ action = TIMER_ASYNC_SHRINK;
+ else if (!oxu->reclaim
+ && qh->qh_state == QH_STATE_LINKED)
+ start_unlink_async(oxu, qh);
+ }
+
+ qh = qh->qh_next.qh;
+ } while (qh);
+ }
+ if (action == TIMER_ASYNC_SHRINK)
+ timer_action(oxu, TIMER_ASYNC_SHRINK);
+}
+
+/*
+ * periodic_next_shadow - return "next" pointer on shadow list
+ * @periodic: host pointer to qh/itd/sitd
+ * @tag: hardware tag for type of this record
+ */
+static union ehci_shadow *periodic_next_shadow(union ehci_shadow *periodic,
+ __le32 tag)
+{
+ switch (tag) {
+ default:
+ case Q_TYPE_QH:
+ return &periodic->qh->qh_next;
+ }
+}
+
+/* caller must hold oxu->lock */
+static void periodic_unlink(struct oxu_hcd *oxu, unsigned frame, void *ptr)
+{
+ union ehci_shadow *prev_p = &oxu->pshadow[frame];
+ __le32 *hw_p = &oxu->periodic[frame];
+ union ehci_shadow here = *prev_p;
+
+ /* find predecessor of "ptr"; hw and shadow lists are in sync */
+ while (here.ptr && here.ptr != ptr) {
+ prev_p = periodic_next_shadow(prev_p, Q_NEXT_TYPE(*hw_p));
+ hw_p = here.hw_next;
+ here = *prev_p;
+ }
+ /* an interrupt entry (at list end) could have been shared */
+ if (!here.ptr)
+ return;
+
+ /* update shadow and hardware lists ... the old "next" pointers
+ * from ptr may still be in use, the caller updates them.
+ */
+ *prev_p = *periodic_next_shadow(&here, Q_NEXT_TYPE(*hw_p));
+ *hw_p = *here.hw_next;
+}
+
+/* how many of the uframe's 125 usecs are allocated? */
+static unsigned short periodic_usecs(struct oxu_hcd *oxu,
+ unsigned frame, unsigned uframe)
+{
+ __le32 *hw_p = &oxu->periodic[frame];
+ union ehci_shadow *q = &oxu->pshadow[frame];
+ unsigned usecs = 0;
+
+ while (q->ptr) {
+ switch (Q_NEXT_TYPE(*hw_p)) {
+ case Q_TYPE_QH:
+ default:
+ /* is it in the S-mask? */
+ if (q->qh->hw_info2 & cpu_to_le32(1 << uframe))
+ usecs += q->qh->usecs;
+ /* ... or C-mask? */
+ if (q->qh->hw_info2 & cpu_to_le32(1 << (8 + uframe)))
+ usecs += q->qh->c_usecs;
+ hw_p = &q->qh->hw_next;
+ q = &q->qh->qh_next;
+ break;
+ }
+ }
+#ifdef DEBUG
+ if (usecs > 100)
+ oxu_err(oxu, "uframe %d sched overrun: %d usecs\n",
+ frame * 8 + uframe, usecs);
+#endif
+ return usecs;
+}
+
+static int enable_periodic(struct oxu_hcd *oxu)
+{
+ u32 cmd;
+ int status;
+
+ /* did clearing PSE did take effect yet?
+ * takes effect only at frame boundaries...
+ */
+ status = handshake(oxu, &oxu->regs->status, STS_PSS, 0, 9 * 125);
+ if (status != 0) {
+ oxu_to_hcd(oxu)->state = HC_STATE_HALT;
+ return status;
+ }
+
+ cmd = readl(&oxu->regs->command) | CMD_PSE;
+ writel(cmd, &oxu->regs->command);
+ /* posted write ... PSS happens later */
+ oxu_to_hcd(oxu)->state = HC_STATE_RUNNING;
+
+ /* make sure ehci_work scans these */
+ oxu->next_uframe = readl(&oxu->regs->frame_index)
+ % (oxu->periodic_size << 3);
+ return 0;
+}
+
+static int disable_periodic(struct oxu_hcd *oxu)
+{
+ u32 cmd;
+ int status;
+
+ /* did setting PSE not take effect yet?
+ * takes effect only at frame boundaries...
+ */
+ status = handshake(oxu, &oxu->regs->status, STS_PSS, STS_PSS, 9 * 125);
+ if (status != 0) {
+ oxu_to_hcd(oxu)->state = HC_STATE_HALT;
+ return status;
+ }
+
+ cmd = readl(&oxu->regs->command) & ~CMD_PSE;
+ writel(cmd, &oxu->regs->command);
+ /* posted write ... */
+
+ oxu->next_uframe = -1;
+ return 0;
+}
+
+/* periodic schedule slots have iso tds (normal or split) first, then a
+ * sparse tree for active interrupt transfers.
+ *
+ * this just links in a qh; caller guarantees uframe masks are set right.
+ * no FSTN support (yet; oxu 0.96+)
+ */
+static int qh_link_periodic(struct oxu_hcd *oxu, struct ehci_qh *qh)
+{
+ unsigned i;
+ unsigned period = qh->period;
+
+ dev_dbg(&qh->dev->dev,
+ "link qh%d-%04x/%p start %d [%d/%d us]\n",
+ period, le32_to_cpup(&qh->hw_info2) & (QH_CMASK | QH_SMASK),
+ qh, qh->start, qh->usecs, qh->c_usecs);
+
+ /* high bandwidth, or otherwise every microframe */
+ if (period == 0)
+ period = 1;
+
+ for (i = qh->start; i < oxu->periodic_size; i += period) {
+ union ehci_shadow *prev = &oxu->pshadow[i];
+ __le32 *hw_p = &oxu->periodic[i];
+ union ehci_shadow here = *prev;
+ __le32 type = 0;
+
+ /* skip the iso nodes at list head */
+ while (here.ptr) {
+ type = Q_NEXT_TYPE(*hw_p);
+ if (type == Q_TYPE_QH)
+ break;
+ prev = periodic_next_shadow(prev, type);
+ hw_p = &here.qh->hw_next;
+ here = *prev;
+ }
+
+ /* sorting each branch by period (slow-->fast)
+ * enables sharing interior tree nodes
+ */
+ while (here.ptr && qh != here.qh) {
+ if (qh->period > here.qh->period)
+ break;
+ prev = &here.qh->qh_next;
+ hw_p = &here.qh->hw_next;
+ here = *prev;
+ }
+ /* link in this qh, unless some earlier pass did that */
+ if (qh != here.qh) {
+ qh->qh_next = here;
+ if (here.qh)
+ qh->hw_next = *hw_p;
+ wmb();
+ prev->qh = qh;
+ *hw_p = QH_NEXT(qh->qh_dma);
+ }
+ }
+ qh->qh_state = QH_STATE_LINKED;
+ qh_get(qh);
+
+ /* update per-qh bandwidth for usbfs */
+ oxu_to_hcd(oxu)->self.bandwidth_allocated += qh->period
+ ? ((qh->usecs + qh->c_usecs) / qh->period)
+ : (qh->usecs * 8);
+
+ /* maybe enable periodic schedule processing */
+ if (!oxu->periodic_sched++)
+ return enable_periodic(oxu);
+
+ return 0;
+}
+
+static void qh_unlink_periodic(struct oxu_hcd *oxu, struct ehci_qh *qh)
+{
+ unsigned i;
+ unsigned period;
+
+ /* FIXME:
+ * IF this isn't high speed
+ * and this qh is active in the current uframe
+ * (and overlay token SplitXstate is false?)
+ * THEN
+ * qh->hw_info1 |= __constant_cpu_to_le32(1 << 7 "ignore");
+ */
+
+ /* high bandwidth, or otherwise part of every microframe */
+ period = qh->period;
+ if (period == 0)
+ period = 1;
+
+ for (i = qh->start; i < oxu->periodic_size; i += period)
+ periodic_unlink(oxu, i, qh);
+
+ /* update per-qh bandwidth for usbfs */
+ oxu_to_hcd(oxu)->self.bandwidth_allocated -= qh->period
+ ? ((qh->usecs + qh->c_usecs) / qh->period)
+ : (qh->usecs * 8);
+
+ dev_dbg(&qh->dev->dev,
+ "unlink qh%d-%04x/%p start %d [%d/%d us]\n",
+ qh->period,
+ le32_to_cpup(&qh->hw_info2) & (QH_CMASK | QH_SMASK),
+ qh, qh->start, qh->usecs, qh->c_usecs);
+
+ /* qh->qh_next still "live" to HC */
+ qh->qh_state = QH_STATE_UNLINK;
+ qh->qh_next.ptr = NULL;
+ qh_put(qh);
+
+ /* maybe turn off periodic schedule */
+ oxu->periodic_sched--;
+ if (!oxu->periodic_sched)
+ (void) disable_periodic(oxu);
+}
+
+static void intr_deschedule(struct oxu_hcd *oxu, struct ehci_qh *qh)
+{
+ unsigned wait;
+
+ qh_unlink_periodic(oxu, qh);
+
+ /* simple/paranoid: always delay, expecting the HC needs to read
+ * qh->hw_next or finish a writeback after SPLIT/CSPLIT ... and
+ * expect khubd to clean up after any CSPLITs we won't issue.
+ * active high speed queues may need bigger delays...
+ */
+ if (list_empty(&qh->qtd_list)
+ || (__constant_cpu_to_le32(QH_CMASK) & qh->hw_info2) != 0)
+ wait = 2;
+ else
+ wait = 55; /* worst case: 3 * 1024 */
+
+ udelay(wait);
+ qh->qh_state = QH_STATE_IDLE;
+ qh->hw_next = EHCI_LIST_END;
+ wmb();
+}
+
+static int check_period(struct oxu_hcd *oxu,
+ unsigned frame, unsigned uframe,
+ unsigned period, unsigned usecs)
+{
+ int claimed;
+
+ /* complete split running into next frame?
+ * given FSTN support, we could sometimes check...
+ */
+ if (uframe >= 8)
+ return 0;
+
+ /*
+ * 80% periodic == 100 usec/uframe available
+ * convert "usecs we need" to "max already claimed"
+ */
+ usecs = 100 - usecs;
+
+ /* we "know" 2 and 4 uframe intervals were rejected; so
+ * for period 0, check _every_ microframe in the schedule.
+ */
+ if (unlikely(period == 0)) {
+ do {
+ for (uframe = 0; uframe < 7; uframe++) {
+ claimed = periodic_usecs(oxu, frame, uframe);
+ if (claimed > usecs)
+ return 0;
+ }
+ } while ((frame += 1) < oxu->periodic_size);
+
+ /* just check the specified uframe, at that period */
+ } else {
+ do {
+ claimed = periodic_usecs(oxu, frame, uframe);
+ if (claimed > usecs)
+ return 0;
+ } while ((frame += period) < oxu->periodic_size);
+ }
+
+ return 1;
+}
+
+static int check_intr_schedule(struct oxu_hcd *oxu,
+ unsigned frame, unsigned uframe,
+ const struct ehci_qh *qh, __le32 *c_maskp)
+{
+ int retval = -ENOSPC;
+
+ if (qh->c_usecs && uframe >= 6) /* FSTN territory? */
+ goto done;
+
+ if (!check_period(oxu, frame, uframe, qh->period, qh->usecs))
+ goto done;
+ if (!qh->c_usecs) {
+ retval = 0;
+ *c_maskp = 0;
+ goto done;
+ }
+
+done:
+ return retval;
+}
+
+/* "first fit" scheduling policy used the first time through,
+ * or when the previous schedule slot can't be re-used.
+ */
+static int qh_schedule(struct oxu_hcd *oxu, struct ehci_qh *qh)
+{
+ int status;
+ unsigned uframe;
+ __le32 c_mask;
+ unsigned frame; /* 0..(qh->period - 1), or NO_FRAME */
+
+ qh_refresh(oxu, qh);
+ qh->hw_next = EHCI_LIST_END;
+ frame = qh->start;
+
+ /* reuse the previous schedule slots, if we can */
+ if (frame < qh->period) {
+ uframe = ffs(le32_to_cpup(&qh->hw_info2) & QH_SMASK);
+ status = check_intr_schedule(oxu, frame, --uframe,
+ qh, &c_mask);
+ } else {
+ uframe = 0;
+ c_mask = 0;
+ status = -ENOSPC;
+ }
+
+ /* else scan the schedule to find a group of slots such that all
+ * uframes have enough periodic bandwidth available.
+ */
+ if (status) {
+ /* "normal" case, uframing flexible except with splits */
+ if (qh->period) {
+ frame = qh->period - 1;
+ do {
+ for (uframe = 0; uframe < 8; uframe++) {
+ status = check_intr_schedule(oxu,
+ frame, uframe, qh,
+ &c_mask);
+ if (status == 0)
+ break;
+ }
+ } while (status && frame--);
+
+ /* qh->period == 0 means every uframe */
+ } else {
+ frame = 0;
+ status = check_intr_schedule(oxu, 0, 0, qh, &c_mask);
+ }
+ if (status)
+ goto done;
+ qh->start = frame;
+
+ /* reset S-frame and (maybe) C-frame masks */
+ qh->hw_info2 &= __constant_cpu_to_le32(~(QH_CMASK | QH_SMASK));
+ qh->hw_info2 |= qh->period
+ ? cpu_to_le32(1 << uframe)
+ : __constant_cpu_to_le32(QH_SMASK);
+ qh->hw_info2 |= c_mask;
+ } else
+ oxu_dbg(oxu, "reused qh %p schedule\n", qh);
+
+ /* stuff into the periodic schedule */
+ status = qh_link_periodic(oxu, qh);
+done:
+ return status;
+}
+
+static int intr_submit(struct oxu_hcd *oxu, struct urb *urb,
+ struct list_head *qtd_list, gfp_t mem_flags)
+{
+ unsigned epnum;
+ unsigned long flags;
+ struct ehci_qh *qh;
+ int status = 0;
+ struct list_head empty;
+
+ /* get endpoint and transfer/schedule data */
+ epnum = urb->ep->desc.bEndpointAddress;
+
+ spin_lock_irqsave(&oxu->lock, flags);
+
+ if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE,
+ &oxu_to_hcd(oxu)->flags))) {
+ status = -ESHUTDOWN;
+ goto done;
+ }
+
+ /* get qh and force any scheduling errors */
+ INIT_LIST_HEAD(&empty);
+ qh = qh_append_tds(oxu, urb, &empty, epnum, &urb->ep->hcpriv);
+ if (qh == NULL) {
+ status = -ENOMEM;
+ goto done;
+ }
+ if (qh->qh_state == QH_STATE_IDLE) {
+ status = qh_schedule(oxu, qh);
+ if (status != 0)
+ goto done;
+ }
+
+ /* then queue the urb's tds to the qh */
+ qh = qh_append_tds(oxu, urb, qtd_list, epnum, &urb->ep->hcpriv);
+ BUG_ON(qh == NULL);
+
+ /* ... update usbfs periodic stats */
+ oxu_to_hcd(oxu)->self.bandwidth_int_reqs++;
+
+done:
+ spin_unlock_irqrestore(&oxu->lock, flags);
+ if (status)
+ qtd_list_free(oxu, urb, qtd_list);
+
+ return status;
+}
+
+static inline int itd_submit(struct oxu_hcd *oxu, struct urb *urb,
+ gfp_t mem_flags)
+{
+ oxu_dbg(oxu, "iso support is missing!\n");
+ return -ENOSYS;
+}
+
+static inline int sitd_submit(struct oxu_hcd *oxu, struct urb *urb,
+ gfp_t mem_flags)
+{
+ oxu_dbg(oxu, "split iso support is missing!\n");
+ return -ENOSYS;
+}
+
+static void scan_periodic(struct oxu_hcd *oxu)
+{
+ unsigned frame, clock, now_uframe, mod;
+ unsigned modified;
+
+ mod = oxu->periodic_size << 3;
+
+ /*
+ * When running, scan from last scan point up to "now"
+ * else clean up by scanning everything that's left.
+ * Touches as few pages as possible: cache-friendly.
+ */
+ now_uframe = oxu->next_uframe;
+ if (HC_IS_RUNNING(oxu_to_hcd(oxu)->state))
+ clock = readl(&oxu->regs->frame_index);
+ else
+ clock = now_uframe + mod - 1;
+ clock %= mod;
+
+ for (;;) {
+ union ehci_shadow q, *q_p;
+ __le32 type, *hw_p;
+ unsigned uframes;
+
+ /* don't scan past the live uframe */
+ frame = now_uframe >> 3;
+ if (frame == (clock >> 3))
+ uframes = now_uframe & 0x07;
+ else {
+ /* safe to scan the whole frame at once */
+ now_uframe |= 0x07;
+ uframes = 8;
+ }
+
+restart:
+ /* scan each element in frame's queue for completions */
+ q_p = &oxu->pshadow[frame];
+ hw_p = &oxu->periodic[frame];
+ q.ptr = q_p->ptr;
+ type = Q_NEXT_TYPE(*hw_p);
+ modified = 0;
+
+ while (q.ptr != NULL) {
+ union ehci_shadow temp;
+ int live;
+
+ live = HC_IS_RUNNING(oxu_to_hcd(oxu)->state);
+ switch (type) {
+ case Q_TYPE_QH:
+ /* handle any completions */
+ temp.qh = qh_get(q.qh);
+ type = Q_NEXT_TYPE(q.qh->hw_next);
+ q = q.qh->qh_next;
+ modified = qh_completions(oxu, temp.qh);
+ if (unlikely(list_empty(&temp.qh->qtd_list)))
+ intr_deschedule(oxu, temp.qh);
+ qh_put(temp.qh);
+ break;
+ default:
+ dbg("corrupt type %d frame %d shadow %p",
+ type, frame, q.ptr);
+ q.ptr = NULL;
+ }
+
+ /* assume completion callbacks modify the queue */
+ if (unlikely(modified))
+ goto restart;
+ }
+
+ /* Stop when we catch up to the HC */
+
+ /* FIXME: this assumes we won't get lapped when
+ * latencies climb; that should be rare, but...
+ * detect it, and just go all the way around.
+ * FLR might help detect this case, so long as latencies
+ * don't exceed periodic_size msec (default 1.024 sec).
+ */
+
+ /* FIXME: likewise assumes HC doesn't halt mid-scan */
+
+ if (now_uframe == clock) {
+ unsigned now;
+
+ if (!HC_IS_RUNNING(oxu_to_hcd(oxu)->state))
+ break;
+ oxu->next_uframe = now_uframe;
+ now = readl(&oxu->regs->frame_index) % mod;
+ if (now_uframe == now)
+ break;
+
+ /* rescan the rest of this frame, then ... */
+ clock = now;
+ } else {
+ now_uframe++;
+ now_uframe %= mod;
+ }
+ }
+}
+
+/* On some systems, leaving remote wakeup enabled prevents system shutdown.
+ * The firmware seems to think that powering off is a wakeup event!
+ * This routine turns off remote wakeup and everything else, on all ports.
+ */
+static void ehci_turn_off_all_ports(struct oxu_hcd *oxu)
+{
+ int port = HCS_N_PORTS(oxu->hcs_params);
+
+ while (port--)
+ writel(PORT_RWC_BITS, &oxu->regs->port_status[port]);
+}
+
+static void ehci_port_power(struct oxu_hcd *oxu, int is_on)
+{
+ unsigned port;
+
+ if (!HCS_PPC(oxu->hcs_params))
+ return;
+
+ oxu_dbg(oxu, "...power%s ports...\n", is_on ? "up" : "down");
+ for (port = HCS_N_PORTS(oxu->hcs_params); port > 0; )
+ (void) oxu_hub_control(oxu_to_hcd(oxu),
+ is_on ? SetPortFeature : ClearPortFeature,
+ USB_PORT_FEAT_POWER,
+ port--, NULL, 0);
+ msleep(20);
+}
+
+/* Called from some interrupts, timers, and so on.
+ * It calls driver completion functions, after dropping oxu->lock.
+ */
+static void ehci_work(struct oxu_hcd *oxu)
+{
+ timer_action_done(oxu, TIMER_IO_WATCHDOG);
+ if (oxu->reclaim_ready)
+ end_unlink_async(oxu);
+
+ /* another CPU may drop oxu->lock during a schedule scan while
+ * it reports urb completions. this flag guards against bogus
+ * attempts at re-entrant schedule scanning.
+ */
+ if (oxu->scanning)
+ return;
+ oxu->scanning = 1;
+ scan_async(oxu);
+ if (oxu->next_uframe != -1)
+ scan_periodic(oxu);
+ oxu->scanning = 0;
+
+ /* the IO watchdog guards against hardware or driver bugs that
+ * misplace IRQs, and should let us run completely without IRQs.
+ * such lossage has been observed on both VT6202 and VT8235.
+ */
+ if (HC_IS_RUNNING(oxu_to_hcd(oxu)->state) &&
+ (oxu->async->qh_next.ptr != NULL ||
+ oxu->periodic_sched != 0))
+ timer_action(oxu, TIMER_IO_WATCHDOG);
+}
+
+static void unlink_async(struct oxu_hcd *oxu, struct ehci_qh *qh)
+{
+ /* if we need to use IAA and it's busy, defer */
+ if (qh->qh_state == QH_STATE_LINKED
+ && oxu->reclaim
+ && HC_IS_RUNNING(oxu_to_hcd(oxu)->state)) {
+ struct ehci_qh *last;
+
+ for (last = oxu->reclaim;
+ last->reclaim;
+ last = last->reclaim)
+ continue;
+ qh->qh_state = QH_STATE_UNLINK_WAIT;
+ last->reclaim = qh;
+
+ /* bypass IAA if the hc can't care */
+ } else if (!HC_IS_RUNNING(oxu_to_hcd(oxu)->state) && oxu->reclaim)
+ end_unlink_async(oxu);
+
+ /* something else might have unlinked the qh by now */
+ if (qh->qh_state == QH_STATE_LINKED)
+ start_unlink_async(oxu, qh);
+}
+
+/*
+ * USB host controller methods
+ */
+
+static irqreturn_t oxu210_hcd_irq(struct usb_hcd *hcd)
+{
+ struct oxu_hcd *oxu = hcd_to_oxu(hcd);
+ u32 status, pcd_status = 0;
+ int bh;
+
+ spin_lock(&oxu->lock);
+
+ status = readl(&oxu->regs->status);
+
+ /* e.g. cardbus physical eject */
+ if (status == ~(u32) 0) {
+ oxu_dbg(oxu, "device removed\n");
+ goto dead;
+ }
+
+ status &= INTR_MASK;
+ if (!status) { /* irq sharing? */
+ spin_unlock(&oxu->lock);
+ return IRQ_NONE;
+ }
+
+ /* clear (just) interrupts */
+ writel(status, &oxu->regs->status);
+ readl(&oxu->regs->command); /* unblock posted write */
+ bh = 0;
+
+#ifdef OXU_VERBOSE_DEBUG
+ /* unrequested/ignored: Frame List Rollover */
+ dbg_status(oxu, "irq", status);
+#endif
+
+ /* INT, ERR, and IAA interrupt rates can be throttled */
+
+ /* normal [4.15.1.2] or error [4.15.1.1] completion */
+ if (likely((status & (STS_INT|STS_ERR)) != 0))
+ bh = 1;
+
+ /* complete the unlinking of some qh [4.15.2.3] */
+ if (status & STS_IAA) {
+ oxu->reclaim_ready = 1;
+ bh = 1;
+ }
+
+ /* remote wakeup [4.3.1] */
+ if (status & STS_PCD) {
+ unsigned i = HCS_N_PORTS(oxu->hcs_params);
+ pcd_status = status;
+
+ /* resume root hub? */
+ if (!(readl(&oxu->regs->command) & CMD_RUN))
+ usb_hcd_resume_root_hub(hcd);
+
+ while (i--) {
+ int pstatus = readl(&oxu->regs->port_status[i]);
+
+ if (pstatus & PORT_OWNER)
+ continue;
+ if (!(pstatus & PORT_RESUME)
+ || oxu->reset_done[i] != 0)
+ continue;
+
+ /* start 20 msec resume signaling from this port,
+ * and make khubd collect PORT_STAT_C_SUSPEND to
+ * stop that signaling.
+ */
+ oxu->reset_done[i] = jiffies + msecs_to_jiffies(20);
+ oxu_dbg(oxu, "port %d remote wakeup\n", i + 1);
+ mod_timer(&hcd->rh_timer, oxu->reset_done[i]);
+ }
+ }
+
+ /* PCI errors [4.15.2.4] */
+ if (unlikely((status & STS_FATAL) != 0)) {
+ /* bogus "fatal" IRQs appear on some chips... why? */
+ status = readl(&oxu->regs->status);
+ dbg_cmd(oxu, "fatal", readl(&oxu->regs->command));
+ dbg_status(oxu, "fatal", status);
+ if (status & STS_HALT) {
+ oxu_err(oxu, "fatal error\n");
+dead:
+ ehci_reset(oxu);
+ writel(0, &oxu->regs->configured_flag);
+ /* generic layer kills/unlinks all urbs, then
+ * uses oxu_stop to clean up the rest
+ */
+ bh = 1;
+ }
+ }
+
+ if (bh)
+ ehci_work(oxu);
+ spin_unlock(&oxu->lock);
+ if (pcd_status & STS_PCD)
+ usb_hcd_poll_rh_status(hcd);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t oxu_irq(struct usb_hcd *hcd)
+{
+ struct oxu_hcd *oxu = hcd_to_oxu(hcd);
+ int ret = IRQ_HANDLED;
+
+ u32 status = oxu_readl(hcd->regs, OXU_CHIPIRQSTATUS);
+ u32 enable = oxu_readl(hcd->regs, OXU_CHIPIRQEN_SET);
+
+ /* Disable all interrupt */
+ oxu_writel(hcd->regs, OXU_CHIPIRQEN_CLR, enable);
+
+ if ((oxu->is_otg && (status & OXU_USBOTGI)) ||
+ (!oxu->is_otg && (status & OXU_USBSPHI)))
+ oxu210_hcd_irq(hcd);
+ else
+ ret = IRQ_NONE;
+
+ /* Enable all interrupt back */
+ oxu_writel(hcd->regs, OXU_CHIPIRQEN_SET, enable);
+
+ return ret;
+}
+
+static void oxu_watchdog(unsigned long param)
+{
+ struct oxu_hcd *oxu = (struct oxu_hcd *) param;
+ unsigned long flags;
+
+ spin_lock_irqsave(&oxu->lock, flags);
+
+ /* lost IAA irqs wedge things badly; seen with a vt8235 */
+ if (oxu->reclaim) {
+ u32 status = readl(&oxu->regs->status);
+ if (status & STS_IAA) {
+ oxu_vdbg(oxu, "lost IAA\n");
+ writel(STS_IAA, &oxu->regs->status);
+ oxu->reclaim_ready = 1;
+ }
+ }
+
+ /* stop async processing after it's idled a bit */
+ if (test_bit(TIMER_ASYNC_OFF, &oxu->actions))
+ start_unlink_async(oxu, oxu->async);
+
+ /* oxu could run by timer, without IRQs ... */
+ ehci_work(oxu);
+
+ spin_unlock_irqrestore(&oxu->lock, flags);
+}
+
+/* One-time init, only for memory state.
+ */
+static int oxu_hcd_init(struct usb_hcd *hcd)
+{
+ struct oxu_hcd *oxu = hcd_to_oxu(hcd);
+ u32 temp;
+ int retval;
+ u32 hcc_params;
+
+ spin_lock_init(&oxu->lock);
+
+ init_timer(&oxu->watchdog);
+ oxu->watchdog.function = oxu_watchdog;
+ oxu->watchdog.data = (unsigned long) oxu;
+
+ /*
+ * hw default: 1K periodic list heads, one per frame.
+ * periodic_size can shrink by USBCMD update if hcc_params allows.
+ */
+ oxu->periodic_size = DEFAULT_I_TDPS;
+ retval = ehci_mem_init(oxu, GFP_KERNEL);
+ if (retval < 0)
+ return retval;
+
+ /* controllers may cache some of the periodic schedule ... */
+ hcc_params = readl(&oxu->caps->hcc_params);
+ if (HCC_ISOC_CACHE(hcc_params)) /* full frame cache */
+ oxu->i_thresh = 8;
+ else /* N microframes cached */
+ oxu->i_thresh = 2 + HCC_ISOC_THRES(hcc_params);
+
+ oxu->reclaim = NULL;
+ oxu->reclaim_ready = 0;
+ oxu->next_uframe = -1;
+
+ /*
+ * dedicate a qh for the async ring head, since we couldn't unlink
+ * a 'real' qh without stopping the async schedule [4.8]. use it
+ * as the 'reclamation list head' too.
+ * its dummy is used in hw_alt_next of many tds, to prevent the qh
+ * from automatically advancing to the next td after short reads.
+ */
+ oxu->async->qh_next.qh = NULL;
+ oxu->async->hw_next = QH_NEXT(oxu->async->qh_dma);
+ oxu->async->hw_info1 = cpu_to_le32(QH_HEAD);
+ oxu->async->hw_token = cpu_to_le32(QTD_STS_HALT);
+ oxu->async->hw_qtd_next = EHCI_LIST_END;
+ oxu->async->qh_state = QH_STATE_LINKED;
+ oxu->async->hw_alt_next = QTD_NEXT(oxu->async->dummy->qtd_dma);
+
+ /* clear interrupt enables, set irq latency */
+ if (log2_irq_thresh < 0 || log2_irq_thresh > 6)
+ log2_irq_thresh = 0;
+ temp = 1 << (16 + log2_irq_thresh);
+ if (HCC_CANPARK(hcc_params)) {
+ /* HW default park == 3, on hardware that supports it (like
+ * NVidia and ALI silicon), maximizes throughput on the async
+ * schedule by avoiding QH fetches between transfers.
+ *
+ * With fast usb storage devices and NForce2, "park" seems to
+ * make problems: throughput reduction (!), data errors...
+ */
+ if (park) {
+ park = min(park, (unsigned) 3);
+ temp |= CMD_PARK;
+ temp |= park << 8;
+ }
+ oxu_dbg(oxu, "park %d\n", park);
+ }
+ if (HCC_PGM_FRAMELISTLEN(hcc_params)) {
+ /* periodic schedule size can be smaller than default */
+ temp &= ~(3 << 2);
+ temp |= (EHCI_TUNE_FLS << 2);
+ }
+ oxu->command = temp;
+
+ return 0;
+}
+
+/* Called during probe() after chip reset completes.
+ */
+static int oxu_reset(struct usb_hcd *hcd)
+{
+ struct oxu_hcd *oxu = hcd_to_oxu(hcd);
+ int ret;
+
+ spin_lock_init(&oxu->mem_lock);
+ INIT_LIST_HEAD(&oxu->urb_list);
+ oxu->urb_len = 0;
+
+ /* FIMXE */
+ hcd->self.controller->dma_mask = 0UL;
+
+ if (oxu->is_otg) {
+ oxu->caps = hcd->regs + OXU_OTG_CAP_OFFSET;
+ oxu->regs = hcd->regs + OXU_OTG_CAP_OFFSET + \
+ HC_LENGTH(readl(&oxu->caps->hc_capbase));
+
+ oxu->mem = hcd->regs + OXU_SPH_MEM;
+ } else {
+ oxu->caps = hcd->regs + OXU_SPH_CAP_OFFSET;
+ oxu->regs = hcd->regs + OXU_SPH_CAP_OFFSET + \
+ HC_LENGTH(readl(&oxu->caps->hc_capbase));
+
+ oxu->mem = hcd->regs + OXU_OTG_MEM;
+ }
+
+ oxu->hcs_params = readl(&oxu->caps->hcs_params);
+ oxu->sbrn = 0x20;
+
+ ret = oxu_hcd_init(hcd);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int oxu_run(struct usb_hcd *hcd)
+{
+ struct oxu_hcd *oxu = hcd_to_oxu(hcd);
+ int retval;
+ u32 temp, hcc_params;
+
+ hcd->uses_new_polling = 1;
+ hcd->poll_rh = 0;
+
+ /* EHCI spec section 4.1 */
+ retval = ehci_reset(oxu);
+ if (retval != 0) {
+ ehci_mem_cleanup(oxu);
+ return retval;
+ }
+ writel(oxu->periodic_dma, &oxu->regs->frame_list);
+ writel((u32) oxu->async->qh_dma, &oxu->regs->async_next);
+
+ /* hcc_params controls whether oxu->regs->segment must (!!!)
+ * be used; it constrains QH/ITD/SITD and QTD locations.
+ * pci_pool consistent memory always uses segment zero.
+ * streaming mappings for I/O buffers, like pci_map_single(),
+ * can return segments above 4GB, if the device allows.
+ *
+ * NOTE: the dma mask is visible through dma_supported(), so
+ * drivers can pass this info along ... like NETIF_F_HIGHDMA,
+ * Scsi_Host.highmem_io, and so forth. It's readonly to all
+ * host side drivers though.
+ */
+ hcc_params = readl(&oxu->caps->hcc_params);
+ if (HCC_64BIT_ADDR(hcc_params))
+ writel(0, &oxu->regs->segment);
+
+ oxu->command &= ~(CMD_LRESET | CMD_IAAD | CMD_PSE |
+ CMD_ASE | CMD_RESET);
+ oxu->command |= CMD_RUN;
+ writel(oxu->command, &oxu->regs->command);
+ dbg_cmd(oxu, "init", oxu->command);
+
+ /*
+ * Start, enabling full USB 2.0 functionality ... usb 1.1 devices
+ * are explicitly handed to companion controller(s), so no TT is
+ * involved with the root hub. (Except where one is integrated,
+ * and there's no companion controller unless maybe for USB OTG.)
+ */
+ hcd->state = HC_STATE_RUNNING;
+ writel(FLAG_CF, &oxu->regs->configured_flag);
+ readl(&oxu->regs->command); /* unblock posted writes */
+
+ temp = HC_VERSION(readl(&oxu->caps->hc_capbase));
+ oxu_info(oxu, "USB %x.%x started, quasi-EHCI %x.%02x, driver %s%s\n",
+ ((oxu->sbrn & 0xf0)>>4), (oxu->sbrn & 0x0f),
+ temp >> 8, temp & 0xff, DRIVER_VERSION,
+ ignore_oc ? ", overcurrent ignored" : "");
+
+ writel(INTR_MASK, &oxu->regs->intr_enable); /* Turn On Interrupts */
+
+ return 0;
+}
+
+static void oxu_stop(struct usb_hcd *hcd)
+{
+ struct oxu_hcd *oxu = hcd_to_oxu(hcd);
+
+ /* Turn off port power on all root hub ports. */
+ ehci_port_power(oxu, 0);
+
+ /* no more interrupts ... */
+ del_timer_sync(&oxu->watchdog);
+
+ spin_lock_irq(&oxu->lock);
+ if (HC_IS_RUNNING(hcd->state))
+ ehci_quiesce(oxu);
+
+ ehci_reset(oxu);
+ writel(0, &oxu->regs->intr_enable);
+ spin_unlock_irq(&oxu->lock);
+
+ /* let companion controllers work when we aren't */
+ writel(0, &oxu->regs->configured_flag);
+
+ /* root hub is shut down separately (first, when possible) */
+ spin_lock_irq(&oxu->lock);
+ if (oxu->async)
+ ehci_work(oxu);
+ spin_unlock_irq(&oxu->lock);
+ ehci_mem_cleanup(oxu);
+
+ dbg_status(oxu, "oxu_stop completed", readl(&oxu->regs->status));
+}
+
+/* Kick in for silicon on any bus (not just pci, etc).
+ * This forcibly disables dma and IRQs, helping kexec and other cases
+ * where the next system software may expect clean state.
+ */
+static void oxu_shutdown(struct usb_hcd *hcd)
+{
+ struct oxu_hcd *oxu = hcd_to_oxu(hcd);
+
+ (void) ehci_halt(oxu);
+ ehci_turn_off_all_ports(oxu);
+
+ /* make BIOS/etc use companion controller during reboot */
+ writel(0, &oxu->regs->configured_flag);
+
+ /* unblock posted writes */
+ readl(&oxu->regs->configured_flag);
+}
+
+/* Non-error returns are a promise to giveback() the urb later
+ * we drop ownership so next owner (or urb unlink) can get it
+ *
+ * urb + dev is in hcd.self.controller.urb_list
+ * we're queueing TDs onto software and hardware lists
+ *
+ * hcd-specific init for hcpriv hasn't been done yet
+ *
+ * NOTE: control, bulk, and interrupt share the same code to append TDs
+ * to a (possibly active) QH, and the same QH scanning code.
+ */
+static int __oxu_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
+ gfp_t mem_flags)
+{
+ struct oxu_hcd *oxu = hcd_to_oxu(hcd);
+ struct list_head qtd_list;
+
+ INIT_LIST_HEAD(&qtd_list);
+
+ switch (usb_pipetype(urb->pipe)) {
+ case PIPE_CONTROL:
+ case PIPE_BULK:
+ default:
+ if (!qh_urb_transaction(oxu, urb, &qtd_list, mem_flags))
+ return -ENOMEM;
+ return submit_async(oxu, urb, &qtd_list, mem_flags);
+
+ case PIPE_INTERRUPT:
+ if (!qh_urb_transaction(oxu, urb, &qtd_list, mem_flags))
+ return -ENOMEM;
+ return intr_submit(oxu, urb, &qtd_list, mem_flags);
+
+ case PIPE_ISOCHRONOUS:
+ if (urb->dev->speed == USB_SPEED_HIGH)
+ return itd_submit(oxu, urb, mem_flags);
+ else
+ return sitd_submit(oxu, urb, mem_flags);
+ }
+}
+
+/* This function is responsible for breaking URBs with big data size
+ * into smaller size and processing small urbs in sequence.
+ */
+static int oxu_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
+ gfp_t mem_flags)
+{
+ struct oxu_hcd *oxu = hcd_to_oxu(hcd);
+ int num, rem;
+ int transfer_buffer_length;
+ void *transfer_buffer;
+ struct urb *murb;
+ int i, ret;
+
+ /* If not bulk pipe just enqueue the URB */
+ if (!usb_pipebulk(urb->pipe))
+ return __oxu_urb_enqueue(hcd, urb, mem_flags);
+
+ /* Otherwise we should verify the USB transfer buffer size! */
+ transfer_buffer = urb->transfer_buffer;
+ transfer_buffer_length = urb->transfer_buffer_length;
+
+ num = urb->transfer_buffer_length / 4096;
+ rem = urb->transfer_buffer_length % 4096;
+ if (rem != 0)
+ num++;
+
+ /* If URB is smaller than 4096 bytes just enqueue it! */
+ if (num == 1)
+ return __oxu_urb_enqueue(hcd, urb, mem_flags);
+
+ /* Ok, we have more job to do! :) */
+
+ for (i = 0; i < num - 1; i++) {
+ /* Get free micro URB poll till a free urb is recieved */
+
+ do {
+ murb = (struct urb *) oxu_murb_alloc(oxu);
+ if (!murb)
+ schedule();
+ } while (!murb);
+
+ /* Coping the urb */
+ memcpy(murb, urb, sizeof(struct urb));
+
+ murb->transfer_buffer_length = 4096;
+ murb->transfer_buffer = transfer_buffer + i * 4096;
+
+ /* Null pointer for the encodes that this is a micro urb */
+ murb->complete = NULL;
+
+ ((struct oxu_murb *) murb)->main = urb;
+ ((struct oxu_murb *) murb)->last = 0;
+
+ /* This loop is to guarantee urb to be processed when there's
+ * not enough resources at a particular time by retrying.
+ */
+ do {
+ ret = __oxu_urb_enqueue(hcd, murb, mem_flags);
+ if (ret)
+ schedule();
+ } while (ret);
+ }
+
+ /* Last urb requires special handling */
+
+ /* Get free micro URB poll till a free urb is recieved */
+ do {
+ murb = (struct urb *) oxu_murb_alloc(oxu);
+ if (!murb)
+ schedule();
+ } while (!murb);
+
+ /* Coping the urb */
+ memcpy(murb, urb, sizeof(struct urb));
+
+ murb->transfer_buffer_length = rem > 0 ? rem : 4096;
+ murb->transfer_buffer = transfer_buffer + (num - 1) * 4096;
+
+ /* Null pointer for the encodes that this is a micro urb */
+ murb->complete = NULL;
+
+ ((struct oxu_murb *) murb)->main = urb;
+ ((struct oxu_murb *) murb)->last = 1;
+
+ do {
+ ret = __oxu_urb_enqueue(hcd, murb, mem_flags);
+ if (ret)
+ schedule();
+ } while (ret);
+
+ return ret;
+}
+
+/* Remove from hardware lists.
+ * Completions normally happen asynchronously
+ */
+static int oxu_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+{
+ struct oxu_hcd *oxu = hcd_to_oxu(hcd);
+ struct ehci_qh *qh;
+ unsigned long flags;
+
+ spin_lock_irqsave(&oxu->lock, flags);
+ switch (usb_pipetype(urb->pipe)) {
+ case PIPE_CONTROL:
+ case PIPE_BULK:
+ default:
+ qh = (struct ehci_qh *) urb->hcpriv;
+ if (!qh)
+ break;
+ unlink_async(oxu, qh);
+ break;
+
+ case PIPE_INTERRUPT:
+ qh = (struct ehci_qh *) urb->hcpriv;
+ if (!qh)
+ break;
+ switch (qh->qh_state) {
+ case QH_STATE_LINKED:
+ intr_deschedule(oxu, qh);
+ /* FALL THROUGH */
+ case QH_STATE_IDLE:
+ qh_completions(oxu, qh);
+ break;
+ default:
+ oxu_dbg(oxu, "bogus qh %p state %d\n",
+ qh, qh->qh_state);
+ goto done;
+ }
+
+ /* reschedule QH iff another request is queued */
+ if (!list_empty(&qh->qtd_list)
+ && HC_IS_RUNNING(hcd->state)) {
+ int status;
+
+ status = qh_schedule(oxu, qh);
+ spin_unlock_irqrestore(&oxu->lock, flags);
+
+ if (status != 0) {
+ /* shouldn't happen often, but ...
+ * FIXME kill those tds' urbs
+ */
+ err("can't reschedule qh %p, err %d",
+ qh, status);
+ }
+ return status;
+ }
+ break;
+ }
+done:
+ spin_unlock_irqrestore(&oxu->lock, flags);
+ return 0;
+}
+
+/* Bulk qh holds the data toggle */
+static void oxu_endpoint_disable(struct usb_hcd *hcd,
+ struct usb_host_endpoint *ep)
+{
+ struct oxu_hcd *oxu = hcd_to_oxu(hcd);
+ unsigned long flags;
+ struct ehci_qh *qh, *tmp;
+
+ /* ASSERT: any requests/urbs are being unlinked */
+ /* ASSERT: nobody can be submitting urbs for this any more */
+
+rescan:
+ spin_lock_irqsave(&oxu->lock, flags);
+ qh = ep->hcpriv;
+ if (!qh)
+ goto done;
+
+ /* endpoints can be iso streams. for now, we don't
+ * accelerate iso completions ... so spin a while.
+ */
+ if (qh->hw_info1 == 0) {
+ oxu_vdbg(oxu, "iso delay\n");
+ goto idle_timeout;
+ }
+
+ if (!HC_IS_RUNNING(hcd->state))
+ qh->qh_state = QH_STATE_IDLE;
+ switch (qh->qh_state) {
+ case QH_STATE_LINKED:
+ for (tmp = oxu->async->qh_next.qh;
+ tmp && tmp != qh;
+ tmp = tmp->qh_next.qh)
+ continue;
+ /* periodic qh self-unlinks on empty */
+ if (!tmp)
+ goto nogood;
+ unlink_async(oxu, qh);
+ /* FALL THROUGH */
+ case QH_STATE_UNLINK: /* wait for hw to finish? */
+idle_timeout:
+ spin_unlock_irqrestore(&oxu->lock, flags);
+ schedule_timeout_uninterruptible(1);
+ goto rescan;
+ case QH_STATE_IDLE: /* fully unlinked */
+ if (list_empty(&qh->qtd_list)) {
+ qh_put(qh);
+ break;
+ }
+ /* else FALL THROUGH */
+ default:
+nogood:
+ /* caller was supposed to have unlinked any requests;
+ * that's not our job. just leak this memory.
+ */
+ oxu_err(oxu, "qh %p (#%02x) state %d%s\n",
+ qh, ep->desc.bEndpointAddress, qh->qh_state,
+ list_empty(&qh->qtd_list) ? "" : "(has tds)");
+ break;
+ }
+ ep->hcpriv = NULL;
+done:
+ spin_unlock_irqrestore(&oxu->lock, flags);
+ return;
+}
+
+static int oxu_get_frame(struct usb_hcd *hcd)
+{
+ struct oxu_hcd *oxu = hcd_to_oxu(hcd);
+
+ return (readl(&oxu->regs->frame_index) >> 3) %
+ oxu->periodic_size;
+}
+
+/* Build "status change" packet (one or two bytes) from HC registers */
+static int oxu_hub_status_data(struct usb_hcd *hcd, char *buf)
+{
+ struct oxu_hcd *oxu = hcd_to_oxu(hcd);
+ u32 temp, mask, status = 0;
+ int ports, i, retval = 1;
+ unsigned long flags;
+
+ /* if !USB_SUSPEND, root hub timers won't get shut down ... */
+ if (!HC_IS_RUNNING(hcd->state))
+ return 0;
+
+ /* init status to no-changes */
+ buf[0] = 0;
+ ports = HCS_N_PORTS(oxu->hcs_params);
+ if (ports > 7) {
+ buf[1] = 0;
+ retval++;
+ }
+
+ /* Some boards (mostly VIA?) report bogus overcurrent indications,
+ * causing massive log spam unless we completely ignore them. It
+ * may be relevant that VIA VT8235 controlers, where PORT_POWER is
+ * always set, seem to clear PORT_OCC and PORT_CSC when writing to
+ * PORT_POWER; that's surprising, but maybe within-spec.
+ */
+ if (!ignore_oc)
+ mask = PORT_CSC | PORT_PEC | PORT_OCC;
+ else
+ mask = PORT_CSC | PORT_PEC;
+
+ /* no hub change reports (bit 0) for now (power, ...) */
+
+ /* port N changes (bit N)? */
+ spin_lock_irqsave(&oxu->lock, flags);
+ for (i = 0; i < ports; i++) {
+ temp = readl(&oxu->regs->port_status[i]);
+
+ /*
+ * Return status information even for ports with OWNER set.
+ * Otherwise khubd wouldn't see the disconnect event when a
+ * high-speed device is switched over to the companion
+ * controller by the user.
+ */
+
+ if (!(temp & PORT_CONNECT))
+ oxu->reset_done[i] = 0;
+ if ((temp & mask) != 0 || ((temp & PORT_RESUME) != 0 &&
+ time_after_eq(jiffies, oxu->reset_done[i]))) {
+ if (i < 7)
+ buf[0] |= 1 << (i + 1);
+ else
+ buf[1] |= 1 << (i - 7);
+ status = STS_PCD;
+ }
+ }
+ /* FIXME autosuspend idle root hubs */
+ spin_unlock_irqrestore(&oxu->lock, flags);
+ return status ? retval : 0;
+}
+
+/* Returns the speed of a device attached to a port on the root hub. */
+static inline unsigned int oxu_port_speed(struct oxu_hcd *oxu,
+ unsigned int portsc)
+{
+ switch ((portsc >> 26) & 3) {
+ case 0:
+ return 0;
+ case 1:
+ return 1 << USB_PORT_FEAT_LOWSPEED;
+ case 2:
+ default:
+ return 1 << USB_PORT_FEAT_HIGHSPEED;
+ }
+}
+
+#define PORT_WAKE_BITS (PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E)
+static int oxu_hub_control(struct usb_hcd *hcd, u16 typeReq,
+ u16 wValue, u16 wIndex, char *buf, u16 wLength)
+{
+ struct oxu_hcd *oxu = hcd_to_oxu(hcd);
+ int ports = HCS_N_PORTS(oxu->hcs_params);
+ u32 __iomem *status_reg = &oxu->regs->port_status[wIndex - 1];
+ u32 temp, status;
+ unsigned long flags;
+ int retval = 0;
+ unsigned selector;
+
+ /*
+ * FIXME: support SetPortFeatures USB_PORT_FEAT_INDICATOR.
+ * HCS_INDICATOR may say we can change LEDs to off/amber/green.
+ * (track current state ourselves) ... blink for diagnostics,
+ * power, "this is the one", etc. EHCI spec supports this.
+ */
+
+ spin_lock_irqsave(&oxu->lock, flags);
+ switch (typeReq) {
+ case ClearHubFeature:
+ switch (wValue) {
+ case C_HUB_LOCAL_POWER:
+ case C_HUB_OVER_CURRENT:
+ /* no hub-wide feature/status flags */
+ break;
+ default:
+ goto error;
+ }
+ break;
+ case ClearPortFeature:
+ if (!wIndex || wIndex > ports)
+ goto error;
+ wIndex--;
+ temp = readl(status_reg);
+
+ /*
+ * Even if OWNER is set, so the port is owned by the
+ * companion controller, khubd needs to be able to clear
+ * the port-change status bits (especially
+ * USB_PORT_FEAT_C_CONNECTION).
+ */
+
+ switch (wValue) {
+ case USB_PORT_FEAT_ENABLE:
+ writel(temp & ~PORT_PE, status_reg);
+ break;
+ case USB_PORT_FEAT_C_ENABLE:
+ writel((temp & ~PORT_RWC_BITS) | PORT_PEC, status_reg);
+ break;
+ case USB_PORT_FEAT_SUSPEND:
+ if (temp & PORT_RESET)
+ goto error;
+ if (temp & PORT_SUSPEND) {
+ if ((temp & PORT_PE) == 0)
+ goto error;
+ /* resume signaling for 20 msec */
+ temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS);
+ writel(temp | PORT_RESUME, status_reg);
+ oxu->reset_done[wIndex] = jiffies
+ + msecs_to_jiffies(20);
+ }
+ break;
+ case USB_PORT_FEAT_C_SUSPEND:
+ /* we auto-clear this feature */
+ break;
+ case USB_PORT_FEAT_POWER:
+ if (HCS_PPC(oxu->hcs_params))
+ writel(temp & ~(PORT_RWC_BITS | PORT_POWER),
+ status_reg);
+ break;
+ case USB_PORT_FEAT_C_CONNECTION:
+ writel((temp & ~PORT_RWC_BITS) | PORT_CSC, status_reg);
+ break;
+ case USB_PORT_FEAT_C_OVER_CURRENT:
+ writel((temp & ~PORT_RWC_BITS) | PORT_OCC, status_reg);
+ break;
+ case USB_PORT_FEAT_C_RESET:
+ /* GetPortStatus clears reset */
+ break;
+ default:
+ goto error;
+ }
+ readl(&oxu->regs->command); /* unblock posted write */
+ break;
+ case GetHubDescriptor:
+ ehci_hub_descriptor(oxu, (struct usb_hub_descriptor *)
+ buf);
+ break;
+ case GetHubStatus:
+ /* no hub-wide feature/status flags */
+ memset(buf, 0, 4);
+ break;
+ case GetPortStatus:
+ if (!wIndex || wIndex > ports)
+ goto error;
+ wIndex--;
+ status = 0;
+ temp = readl(status_reg);
+
+ /* wPortChange bits */
+ if (temp & PORT_CSC)
+ status |= 1 << USB_PORT_FEAT_C_CONNECTION;
+ if (temp & PORT_PEC)
+ status |= 1 << USB_PORT_FEAT_C_ENABLE;
+ if ((temp & PORT_OCC) && !ignore_oc)
+ status |= 1 << USB_PORT_FEAT_C_OVER_CURRENT;
+
+ /* whoever resumes must GetPortStatus to complete it!! */
+ if (temp & PORT_RESUME) {
+
+ /* Remote Wakeup received? */
+ if (!oxu->reset_done[wIndex]) {
+ /* resume signaling for 20 msec */
+ oxu->reset_done[wIndex] = jiffies
+ + msecs_to_jiffies(20);
+ /* check the port again */
+ mod_timer(&oxu_to_hcd(oxu)->rh_timer,
+ oxu->reset_done[wIndex]);
+ }
+
+ /* resume completed? */
+ else if (time_after_eq(jiffies,
+ oxu->reset_done[wIndex])) {
+ status |= 1 << USB_PORT_FEAT_C_SUSPEND;
+ oxu->reset_done[wIndex] = 0;
+
+ /* stop resume signaling */
+ temp = readl(status_reg);
+ writel(temp & ~(PORT_RWC_BITS | PORT_RESUME),
+ status_reg);
+ retval = handshake(oxu, status_reg,
+ PORT_RESUME, 0, 2000 /* 2msec */);
+ if (retval != 0) {
+ oxu_err(oxu,
+ "port %d resume error %d\n",
+ wIndex + 1, retval);
+ goto error;
+ }
+ temp &= ~(PORT_SUSPEND|PORT_RESUME|(3<<10));
+ }
+ }
+
+ /* whoever resets must GetPortStatus to complete it!! */
+ if ((temp & PORT_RESET)
+ && time_after_eq(jiffies,
+ oxu->reset_done[wIndex])) {
+ status |= 1 << USB_PORT_FEAT_C_RESET;
+ oxu->reset_done[wIndex] = 0;
+
+ /* force reset to complete */
+ writel(temp & ~(PORT_RWC_BITS | PORT_RESET),
+ status_reg);
+ /* REVISIT: some hardware needs 550+ usec to clear
+ * this bit; seems too long to spin routinely...
+ */
+ retval = handshake(oxu, status_reg,
+ PORT_RESET, 0, 750);
+ if (retval != 0) {
+ oxu_err(oxu, "port %d reset error %d\n",
+ wIndex + 1, retval);
+ goto error;
+ }
+
+ /* see what we found out */
+ temp = check_reset_complete(oxu, wIndex, status_reg,
+ readl(status_reg));
+ }
+
+ /* transfer dedicated ports to the companion hc */
+ if ((temp & PORT_CONNECT) &&
+ test_bit(wIndex, &oxu->companion_ports)) {
+ temp &= ~PORT_RWC_BITS;
+ temp |= PORT_OWNER;
+ writel(temp, status_reg);
+ oxu_dbg(oxu, "port %d --> companion\n", wIndex + 1);
+ temp = readl(status_reg);
+ }
+
+ /*
+ * Even if OWNER is set, there's no harm letting khubd
+ * see the wPortStatus values (they should all be 0 except
+ * for PORT_POWER anyway).
+ */
+
+ if (temp & PORT_CONNECT) {
+ status |= 1 << USB_PORT_FEAT_CONNECTION;
+ /* status may be from integrated TT */
+ status |= oxu_port_speed(oxu, temp);
+ }
+ if (temp & PORT_PE)
+ status |= 1 << USB_PORT_FEAT_ENABLE;
+ if (temp & (PORT_SUSPEND|PORT_RESUME))
+ status |= 1 << USB_PORT_FEAT_SUSPEND;
+ if (temp & PORT_OC)
+ status |= 1 << USB_PORT_FEAT_OVER_CURRENT;
+ if (temp & PORT_RESET)
+ status |= 1 << USB_PORT_FEAT_RESET;
+ if (temp & PORT_POWER)
+ status |= 1 << USB_PORT_FEAT_POWER;
+
+#ifndef OXU_VERBOSE_DEBUG
+ if (status & ~0xffff) /* only if wPortChange is interesting */
+#endif
+ dbg_port(oxu, "GetStatus", wIndex + 1, temp);
+ put_unaligned(cpu_to_le32(status), (__le32 *) buf);
+ break;
+ case SetHubFeature:
+ switch (wValue) {
+ case C_HUB_LOCAL_POWER:
+ case C_HUB_OVER_CURRENT:
+ /* no hub-wide feature/status flags */
+ break;
+ default:
+ goto error;
+ }
+ break;
+ case SetPortFeature:
+ selector = wIndex >> 8;
+ wIndex &= 0xff;
+ if (!wIndex || wIndex > ports)
+ goto error;
+ wIndex--;
+ temp = readl(status_reg);
+ if (temp & PORT_OWNER)
+ break;
+
+ temp &= ~PORT_RWC_BITS;
+ switch (wValue) {
+ case USB_PORT_FEAT_SUSPEND:
+ if ((temp & PORT_PE) == 0
+ || (temp & PORT_RESET) != 0)
+ goto error;
+ if (device_may_wakeup(&hcd->self.root_hub->dev))
+ temp |= PORT_WAKE_BITS;
+ writel(temp | PORT_SUSPEND, status_reg);
+ break;
+ case USB_PORT_FEAT_POWER:
+ if (HCS_PPC(oxu->hcs_params))
+ writel(temp | PORT_POWER, status_reg);
+ break;
+ case USB_PORT_FEAT_RESET:
+ if (temp & PORT_RESUME)
+ goto error;
+ /* line status bits may report this as low speed,
+ * which can be fine if this root hub has a
+ * transaction translator built in.
+ */
+ oxu_vdbg(oxu, "port %d reset\n", wIndex + 1);
+ temp |= PORT_RESET;
+ temp &= ~PORT_PE;
+
+ /*
+ * caller must wait, then call GetPortStatus
+ * usb 2.0 spec says 50 ms resets on root
+ */
+ oxu->reset_done[wIndex] = jiffies
+ + msecs_to_jiffies(50);
+ writel(temp, status_reg);
+ break;
+
+ /* For downstream facing ports (these): one hub port is put
+ * into test mode according to USB2 11.24.2.13, then the hub
+ * must be reset (which for root hub now means rmmod+modprobe,
+ * or else system reboot). See EHCI 2.3.9 and 4.14 for info
+ * about the EHCI-specific stuff.
+ */
+ case USB_PORT_FEAT_TEST:
+ if (!selector || selector > 5)
+ goto error;
+ ehci_quiesce(oxu);
+ ehci_halt(oxu);
+ temp |= selector << 16;
+ writel(temp, status_reg);
+ break;
+
+ default:
+ goto error;
+ }
+ readl(&oxu->regs->command); /* unblock posted writes */
+ break;
+
+ default:
+error:
+ /* "stall" on error */
+ retval = -EPIPE;
+ }
+ spin_unlock_irqrestore(&oxu->lock, flags);
+ return retval;
+}
+
+#ifdef CONFIG_PM
+
+static int oxu_bus_suspend(struct usb_hcd *hcd)
+{
+ struct oxu_hcd *oxu = hcd_to_oxu(hcd);
+ int port;
+ int mask;
+
+ oxu_dbg(oxu, "suspend root hub\n");
+
+ if (time_before(jiffies, oxu->next_statechange))
+ msleep(5);
+
+ port = HCS_N_PORTS(oxu->hcs_params);
+ spin_lock_irq(&oxu->lock);
+
+ /* stop schedules, clean any completed work */
+ if (HC_IS_RUNNING(hcd->state)) {
+ ehci_quiesce(oxu);
+ hcd->state = HC_STATE_QUIESCING;
+ }
+ oxu->command = readl(&oxu->regs->command);
+ if (oxu->reclaim)
+ oxu->reclaim_ready = 1;
+ ehci_work(oxu);
+
+ /* Unlike other USB host controller types, EHCI doesn't have
+ * any notion of "global" or bus-wide suspend. The driver has
+ * to manually suspend all the active unsuspended ports, and
+ * then manually resume them in the bus_resume() routine.
+ */
+ oxu->bus_suspended = 0;
+ while (port--) {
+ u32 __iomem *reg = &oxu->regs->port_status[port];
+ u32 t1 = readl(reg) & ~PORT_RWC_BITS;
+ u32 t2 = t1;
+
+ /* keep track of which ports we suspend */
+ if ((t1 & PORT_PE) && !(t1 & PORT_OWNER) &&
+ !(t1 & PORT_SUSPEND)) {
+ t2 |= PORT_SUSPEND;
+ set_bit(port, &oxu->bus_suspended);
+ }
+
+ /* enable remote wakeup on all ports */
+ if (device_may_wakeup(&hcd->self.root_hub->dev))
+ t2 |= PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E;
+ else
+ t2 &= ~(PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E);
+
+ if (t1 != t2) {
+ oxu_vdbg(oxu, "port %d, %08x -> %08x\n",
+ port + 1, t1, t2);
+ writel(t2, reg);
+ }
+ }
+
+ /* turn off now-idle HC */
+ del_timer_sync(&oxu->watchdog);
+ ehci_halt(oxu);
+ hcd->state = HC_STATE_SUSPENDED;
+
+ /* allow remote wakeup */
+ mask = INTR_MASK;
+ if (!device_may_wakeup(&hcd->self.root_hub->dev))
+ mask &= ~STS_PCD;
+ writel(mask, &oxu->regs->intr_enable);
+ readl(&oxu->regs->intr_enable);
+
+ oxu->next_statechange = jiffies + msecs_to_jiffies(10);
+ spin_unlock_irq(&oxu->lock);
+ return 0;
+}
+
+/* Caller has locked the root hub, and should reset/reinit on error */
+static int oxu_bus_resume(struct usb_hcd *hcd)
+{
+ struct oxu_hcd *oxu = hcd_to_oxu(hcd);
+ u32 temp;
+ int i;
+
+ if (time_before(jiffies, oxu->next_statechange))
+ msleep(5);
+ spin_lock_irq(&oxu->lock);
+
+ /* Ideally and we've got a real resume here, and no port's power
+ * was lost. (For PCI, that means Vaux was maintained.) But we
+ * could instead be restoring a swsusp snapshot -- so that BIOS was
+ * the last user of the controller, not reset/pm hardware keeping
+ * state we gave to it.
+ */
+ temp = readl(&oxu->regs->intr_enable);
+ oxu_dbg(oxu, "resume root hub%s\n", temp ? "" : " after power loss");
+
+ /* at least some APM implementations will try to deliver
+ * IRQs right away, so delay them until we're ready.
+ */
+ writel(0, &oxu->regs->intr_enable);
+
+ /* re-init operational registers */
+ writel(0, &oxu->regs->segment);
+ writel(oxu->periodic_dma, &oxu->regs->frame_list);
+ writel((u32) oxu->async->qh_dma, &oxu->regs->async_next);
+
+ /* restore CMD_RUN, framelist size, and irq threshold */
+ writel(oxu->command, &oxu->regs->command);
+
+ /* Some controller/firmware combinations need a delay during which
+ * they set up the port statuses. See Bugzilla #8190. */
+ mdelay(8);
+
+ /* manually resume the ports we suspended during bus_suspend() */
+ i = HCS_N_PORTS(oxu->hcs_params);
+ while (i--) {
+ temp = readl(&oxu->regs->port_status[i]);
+ temp &= ~(PORT_RWC_BITS
+ | PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E);
+ if (test_bit(i, &oxu->bus_suspended) && (temp & PORT_SUSPEND)) {
+ oxu->reset_done[i] = jiffies + msecs_to_jiffies(20);
+ temp |= PORT_RESUME;
+ }
+ writel(temp, &oxu->regs->port_status[i]);
+ }
+ i = HCS_N_PORTS(oxu->hcs_params);
+ mdelay(20);
+ while (i--) {
+ temp = readl(&oxu->regs->port_status[i]);
+ if (test_bit(i, &oxu->bus_suspended) && (temp & PORT_SUSPEND)) {
+ temp &= ~(PORT_RWC_BITS | PORT_RESUME);
+ writel(temp, &oxu->regs->port_status[i]);
+ oxu_vdbg(oxu, "resumed port %d\n", i + 1);
+ }
+ }
+ (void) readl(&oxu->regs->command);
+
+ /* maybe re-activate the schedule(s) */
+ temp = 0;
+ if (oxu->async->qh_next.qh)
+ temp |= CMD_ASE;
+ if (oxu->periodic_sched)
+ temp |= CMD_PSE;
+ if (temp) {
+ oxu->command |= temp;
+ writel(oxu->command, &oxu->regs->command);
+ }
+
+ oxu->next_statechange = jiffies + msecs_to_jiffies(5);
+ hcd->state = HC_STATE_RUNNING;
+
+ /* Now we can safely re-enable irqs */
+ writel(INTR_MASK, &oxu->regs->intr_enable);
+
+ spin_unlock_irq(&oxu->lock);
+ return 0;
+}
+
+#else
+
+static int oxu_bus_suspend(struct usb_hcd *hcd)
+{
+ return 0;
+}
+
+static int oxu_bus_resume(struct usb_hcd *hcd)
+{
+ return 0;
+}
+
+#endif /* CONFIG_PM */
+
+static const struct hc_driver oxu_hc_driver = {
+ .description = "oxu210hp_hcd",
+ .product_desc = "oxu210hp HCD",
+ .hcd_priv_size = sizeof(struct oxu_hcd),
+
+ /*
+ * Generic hardware linkage
+ */
+ .irq = oxu_irq,
+ .flags = HCD_MEMORY | HCD_USB2,
+
+ /*
+ * Basic lifecycle operations
+ */
+ .reset = oxu_reset,
+ .start = oxu_run,
+ .stop = oxu_stop,
+ .shutdown = oxu_shutdown,
+
+ /*
+ * Managing i/o requests and associated device resources
+ */
+ .urb_enqueue = oxu_urb_enqueue,
+ .urb_dequeue = oxu_urb_dequeue,
+ .endpoint_disable = oxu_endpoint_disable,
+
+ /*
+ * Scheduling support
+ */
+ .get_frame_number = oxu_get_frame,
+
+ /*
+ * Root hub support
+ */
+ .hub_status_data = oxu_hub_status_data,
+ .hub_control = oxu_hub_control,
+ .bus_suspend = oxu_bus_suspend,
+ .bus_resume = oxu_bus_resume,
+};
+
+/*
+ * Module stuff
+ */
+
+static void oxu_configuration(struct platform_device *pdev, void *base)
+{
+ u32 tmp;
+
+ /* Initialize top level registers.
+ * First write ever
+ */
+ oxu_writel(base, OXU_HOSTIFCONFIG, 0x0000037D);
+ oxu_writel(base, OXU_SOFTRESET, OXU_SRESET);
+ oxu_writel(base, OXU_HOSTIFCONFIG, 0x0000037D);
+
+ tmp = oxu_readl(base, OXU_PIOBURSTREADCTRL);
+ oxu_writel(base, OXU_PIOBURSTREADCTRL, tmp | 0x0040);
+
+ oxu_writel(base, OXU_ASO, OXU_SPHPOEN | OXU_OVRCCURPUPDEN |
+ OXU_COMPARATOR | OXU_ASO_OP);
+
+ tmp = oxu_readl(base, OXU_CLKCTRL_SET);
+ oxu_writel(base, OXU_CLKCTRL_SET, tmp | OXU_SYSCLKEN | OXU_USBOTGCLKEN);
+
+ /* Clear all top interrupt enable */
+ oxu_writel(base, OXU_CHIPIRQEN_CLR, 0xff);
+
+ /* Clear all top interrupt status */
+ oxu_writel(base, OXU_CHIPIRQSTATUS, 0xff);
+
+ /* Enable all needed top interrupt except OTG SPH core */
+ oxu_writel(base, OXU_CHIPIRQEN_SET, OXU_USBSPHLPWUI | OXU_USBOTGLPWUI);
+}
+
+static int oxu_verify_id(struct platform_device *pdev, void *base)
+{
+ u32 id;
+ char *bo[] = {
+ "reserved",
+ "128-pin LQFP",
+ "84-pin TFBGA",
+ "reserved",
+ };
+
+ /* Read controller signature register to find a match */
+ id = oxu_readl(base, OXU_DEVICEID);
+ dev_info(&pdev->dev, "device ID %x\n", id);
+ if ((id & OXU_REV_MASK) != (OXU_REV_2100 << OXU_REV_SHIFT))
+ return -1;
+
+ dev_info(&pdev->dev, "found device %x %s (%04x:%04x)\n",
+ id >> OXU_REV_SHIFT,
+ bo[(id & OXU_BO_MASK) >> OXU_BO_SHIFT],
+ (id & OXU_MAJ_REV_MASK) >> OXU_MAJ_REV_SHIFT,
+ (id & OXU_MIN_REV_MASK) >> OXU_MIN_REV_SHIFT);
+
+ return 0;
+}
+
+static const struct hc_driver oxu_hc_driver;
+static struct usb_hcd *oxu_create(struct platform_device *pdev,
+ unsigned long memstart, unsigned long memlen,
+ void *base, int irq, int otg)
+{
+ struct device *dev = &pdev->dev;
+
+ struct usb_hcd *hcd;
+ struct oxu_hcd *oxu;
+ int ret;
+
+ /* Set endian mode and host mode */
+ oxu_writel(base + (otg ? OXU_OTG_CORE_OFFSET : OXU_SPH_CORE_OFFSET),
+ OXU_USBMODE,
+ OXU_CM_HOST_ONLY | OXU_ES_LITTLE | OXU_VBPS);
+
+ hcd = usb_create_hcd(&oxu_hc_driver, dev,
+ otg ? "oxu210hp_otg" : "oxu210hp_sph");
+ if (!hcd)
+ return ERR_PTR(-ENOMEM);
+
+ hcd->rsrc_start = memstart;
+ hcd->rsrc_len = memlen;
+ hcd->regs = base;
+ hcd->irq = irq;
+ hcd->state = HC_STATE_HALT;
+
+ oxu = hcd_to_oxu(hcd);
+ oxu->is_otg = otg;
+
+ ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ return hcd;
+}
+
+static int oxu_init(struct platform_device *pdev,
+ unsigned long memstart, unsigned long memlen,
+ void *base, int irq)
+{
+ struct oxu_info *info = platform_get_drvdata(pdev);
+ struct usb_hcd *hcd;
+ int ret;
+
+ /* First time configuration at start up */
+ oxu_configuration(pdev, base);
+
+ ret = oxu_verify_id(pdev, base);
+ if (ret) {
+ dev_err(&pdev->dev, "no devices found!\n");
+ return -ENODEV;
+ }
+
+ /* Create the OTG controller */
+ hcd = oxu_create(pdev, memstart, memlen, base, irq, 1);
+ if (IS_ERR(hcd)) {
+ dev_err(&pdev->dev, "cannot create OTG controller!\n");
+ ret = PTR_ERR(hcd);
+ goto error_create_otg;
+ }
+ info->hcd[0] = hcd;
+
+ /* Create the SPH host controller */
+ hcd = oxu_create(pdev, memstart, memlen, base, irq, 0);
+ if (IS_ERR(hcd)) {
+ dev_err(&pdev->dev, "cannot create SPH controller!\n");
+ ret = PTR_ERR(hcd);
+ goto error_create_sph;
+ }
+ info->hcd[1] = hcd;
+
+ oxu_writel(base, OXU_CHIPIRQEN_SET,
+ oxu_readl(base, OXU_CHIPIRQEN_SET) | 3);
+
+ return 0;
+
+error_create_sph:
+ usb_remove_hcd(info->hcd[0]);
+ usb_put_hcd(info->hcd[0]);
+
+error_create_otg:
+ return ret;
+}
+
+static int oxu_drv_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ void *base;
+ unsigned long memstart, memlen;
+ int irq, ret;
+ struct oxu_info *info;
+
+ if (usb_disabled())
+ return -ENODEV;
+
+ /*
+ * Get the platform resources
+ */
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(&pdev->dev,
+ "no IRQ! Check %s setup!\n", dev_name(&pdev->dev));
+ return -ENODEV;
+ }
+ irq = res->start;
+ dev_dbg(&pdev->dev, "IRQ resource %d\n", irq);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "no registers address! Check %s setup!\n",
+ dev_name(&pdev->dev));
+ return -ENODEV;
+ }
+ memstart = res->start;
+ memlen = res->end - res->start + 1;
+ dev_dbg(&pdev->dev, "MEM resource %lx-%lx\n", memstart, memlen);
+ if (!request_mem_region(memstart, memlen,
+ oxu_hc_driver.description)) {
+ dev_dbg(&pdev->dev, "memory area already in use\n");
+ return -EBUSY;
+ }
+
+ ret = set_irq_type(irq, IRQF_TRIGGER_FALLING);
+ if (ret) {
+ dev_err(&pdev->dev, "error setting irq type\n");
+ ret = -EFAULT;
+ goto error_set_irq_type;
+ }
+
+ base = ioremap(memstart, memlen);
+ if (!base) {
+ dev_dbg(&pdev->dev, "error mapping memory\n");
+ ret = -EFAULT;
+ goto error_ioremap;
+ }
+
+ /* Allocate a driver data struct to hold useful info for both
+ * SPH & OTG devices
+ */
+ info = kzalloc(sizeof(struct oxu_info), GFP_KERNEL);
+ if (!info) {
+ dev_dbg(&pdev->dev, "error allocating memory\n");
+ ret = -EFAULT;
+ goto error_alloc;
+ }
+ platform_set_drvdata(pdev, info);
+
+ ret = oxu_init(pdev, memstart, memlen, base, irq);
+ if (ret < 0) {
+ dev_dbg(&pdev->dev, "cannot init USB devices\n");
+ goto error_init;
+ }
+
+ dev_info(&pdev->dev, "devices enabled and running\n");
+ platform_set_drvdata(pdev, info);
+
+ return 0;
+
+error_init:
+ kfree(info);
+ platform_set_drvdata(pdev, NULL);
+
+error_alloc:
+ iounmap(base);
+
+error_set_irq_type:
+error_ioremap:
+ release_mem_region(memstart, memlen);
+
+ dev_err(&pdev->dev, "init %s fail, %d\n", dev_name(&pdev->dev), ret);
+ return ret;
+}
+
+static void oxu_remove(struct platform_device *pdev, struct usb_hcd *hcd)
+{
+ usb_remove_hcd(hcd);
+ usb_put_hcd(hcd);
+}
+
+static int oxu_drv_remove(struct platform_device *pdev)
+{
+ struct oxu_info *info = platform_get_drvdata(pdev);
+ unsigned long memstart = info->hcd[0]->rsrc_start,
+ memlen = info->hcd[0]->rsrc_len;
+ void *base = info->hcd[0]->regs;
+
+ oxu_remove(pdev, info->hcd[0]);
+ oxu_remove(pdev, info->hcd[1]);
+
+ iounmap(base);
+ release_mem_region(memstart, memlen);
+
+ kfree(info);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static void oxu_drv_shutdown(struct platform_device *pdev)
+{
+ oxu_drv_remove(pdev);
+}
+
+#if 0
+/* FIXME: TODO */
+static int oxu_drv_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+
+ return 0;
+}
+
+static int oxu_drv_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+
+ return 0;
+}
+#else
+#define oxu_drv_suspend NULL
+#define oxu_drv_resume NULL
+#endif
+
+static struct platform_driver oxu_driver = {
+ .probe = oxu_drv_probe,
+ .remove = oxu_drv_remove,
+ .shutdown = oxu_drv_shutdown,
+ .suspend = oxu_drv_suspend,
+ .resume = oxu_drv_resume,
+ .driver = {
+ .name = "oxu210hp-hcd",
+ .bus = &platform_bus_type
+ }
+};
+
+static int __init oxu_module_init(void)
+{
+ int retval = 0;
+
+ retval = platform_driver_register(&oxu_driver);
+ if (retval < 0)
+ return retval;
+
+ return retval;
+}
+
+static void __exit oxu_module_cleanup(void)
+{
+ platform_driver_unregister(&oxu_driver);
+}
+
+module_init(oxu_module_init);
+module_exit(oxu_module_cleanup);
+
+MODULE_DESCRIPTION("Oxford OXU210HP HCD driver - ver. " DRIVER_VERSION);
+MODULE_AUTHOR("Rodolfo Giometti ");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/host/oxu210hp.h b/drivers/usb/host/oxu210hp.h
new file mode 100644
index 00000000000..8910e271cc7
--- /dev/null
+++ b/drivers/usb/host/oxu210hp.h
@@ -0,0 +1,447 @@
+/*
+ * Host interface registers
+ */
+
+#define OXU_DEVICEID 0x00
+ #define OXU_REV_MASK 0xffff0000
+ #define OXU_REV_SHIFT 16
+ #define OXU_REV_2100 0x2100
+ #define OXU_BO_SHIFT 8
+ #define OXU_BO_MASK (0x3 << OXU_BO_SHIFT)
+ #define OXU_MAJ_REV_SHIFT 4
+ #define OXU_MAJ_REV_MASK (0xf << OXU_MAJ_REV_SHIFT)
+ #define OXU_MIN_REV_SHIFT 0
+ #define OXU_MIN_REV_MASK (0xf << OXU_MIN_REV_SHIFT)
+#define OXU_HOSTIFCONFIG 0x04
+#define OXU_SOFTRESET 0x08
+ #define OXU_SRESET (1 << 0)
+
+#define OXU_PIOBURSTREADCTRL 0x0C
+
+#define OXU_CHIPIRQSTATUS 0x10
+#define OXU_CHIPIRQEN_SET 0x14
+#define OXU_CHIPIRQEN_CLR 0x18
+ #define OXU_USBSPHLPWUI 0x00000080
+ #define OXU_USBOTGLPWUI 0x00000040
+ #define OXU_USBSPHI 0x00000002
+ #define OXU_USBOTGI 0x00000001
+
+#define OXU_CLKCTRL_SET 0x1C
+ #define OXU_SYSCLKEN 0x00000008
+ #define OXU_USBSPHCLKEN 0x00000002
+ #define OXU_USBOTGCLKEN 0x00000001
+
+#define OXU_ASO 0x68
+ #define OXU_SPHPOEN 0x00000100
+ #define OXU_OVRCCURPUPDEN 0x00000800
+ #define OXU_ASO_OP (1 << 10)
+ #define OXU_COMPARATOR 0x000004000
+
+#define OXU_USBMODE 0x1A8
+ #define OXU_VBPS 0x00000020
+ #define OXU_ES_LITTLE 0x00000000
+ #define OXU_CM_HOST_ONLY 0x00000003
+
+/*
+ * Proper EHCI structs & defines
+ */
+
+/* Magic numbers that can affect system performance */
+#define EHCI_TUNE_CERR 3 /* 0-3 qtd retries; 0 == don't stop */
+#define EHCI_TUNE_RL_HS 4 /* nak throttle; see 4.9 */
+#define EHCI_TUNE_RL_TT 0
+#define EHCI_TUNE_MULT_HS 1 /* 1-3 transactions/uframe; 4.10.3 */
+#define EHCI_TUNE_MULT_TT 1
+#define EHCI_TUNE_FLS 2 /* (small) 256 frame schedule */
+
+struct oxu_hcd;
+
+/* EHCI register interface, corresponds to EHCI Revision 0.95 specification */
+
+/* Section 2.2 Host Controller Capability Registers */
+struct ehci_caps {
+ /* these fields are specified as 8 and 16 bit registers,
+ * but some hosts can't perform 8 or 16 bit PCI accesses.
+ */
+ u32 hc_capbase;
+#define HC_LENGTH(p) (((p)>>00)&0x00ff) /* bits 7:0 */
+#define HC_VERSION(p) (((p)>>16)&0xffff) /* bits 31:16 */
+ u32 hcs_params; /* HCSPARAMS - offset 0x4 */
+#define HCS_DEBUG_PORT(p) (((p)>>20)&0xf) /* bits 23:20, debug port? */
+#define HCS_INDICATOR(p) ((p)&(1 << 16)) /* true: has port indicators */
+#define HCS_N_CC(p) (((p)>>12)&0xf) /* bits 15:12, #companion HCs */
+#define HCS_N_PCC(p) (((p)>>8)&0xf) /* bits 11:8, ports per CC */
+#define HCS_PORTROUTED(p) ((p)&(1 << 7)) /* true: port routing */
+#define HCS_PPC(p) ((p)&(1 << 4)) /* true: port power control */
+#define HCS_N_PORTS(p) (((p)>>0)&0xf) /* bits 3:0, ports on HC */
+
+ u32 hcc_params; /* HCCPARAMS - offset 0x8 */
+#define HCC_EXT_CAPS(p) (((p)>>8)&0xff) /* for pci extended caps */
+#define HCC_ISOC_CACHE(p) ((p)&(1 << 7)) /* true: can cache isoc frame */
+#define HCC_ISOC_THRES(p) (((p)>>4)&0x7) /* bits 6:4, uframes cached */
+#define HCC_CANPARK(p) ((p)&(1 << 2)) /* true: can park on async qh */
+#define HCC_PGM_FRAMELISTLEN(p) ((p)&(1 << 1)) /* true: periodic_size changes*/
+#define HCC_64BIT_ADDR(p) ((p)&(1)) /* true: can use 64-bit addr */
+ u8 portroute[8]; /* nibbles for routing - offset 0xC */
+} __attribute__ ((packed));
+
+
+/* Section 2.3 Host Controller Operational Registers */
+struct ehci_regs {
+ /* USBCMD: offset 0x00 */
+ u32 command;
+/* 23:16 is r/w intr rate, in microframes; default "8" == 1/msec */
+#define CMD_PARK (1<<11) /* enable "park" on async qh */
+#define CMD_PARK_CNT(c) (((c)>>8)&3) /* how many transfers to park for */
+#define CMD_LRESET (1<<7) /* partial reset (no ports, etc) */
+#define CMD_IAAD (1<<6) /* "doorbell" interrupt async advance */
+#define CMD_ASE (1<<5) /* async schedule enable */
+#define CMD_PSE (1<<4) /* periodic schedule enable */
+/* 3:2 is periodic frame list size */
+#define CMD_RESET (1<<1) /* reset HC not bus */
+#define CMD_RUN (1<<0) /* start/stop HC */
+
+ /* USBSTS: offset 0x04 */
+ u32 status;
+#define STS_ASS (1<<15) /* Async Schedule Status */
+#define STS_PSS (1<<14) /* Periodic Schedule Status */
+#define STS_RECL (1<<13) /* Reclamation */
+#define STS_HALT (1<<12) /* Not running (any reason) */
+/* some bits reserved */
+ /* these STS_* flags are also intr_enable bits (USBINTR) */
+#define STS_IAA (1<<5) /* Interrupted on async advance */
+#define STS_FATAL (1<<4) /* such as some PCI access errors */
+#define STS_FLR (1<<3) /* frame list rolled over */
+#define STS_PCD (1<<2) /* port change detect */
+#define STS_ERR (1<<1) /* "error" completion (overflow, ...) */
+#define STS_INT (1<<0) /* "normal" completion (short, ...) */
+
+#define INTR_MASK (STS_IAA | STS_FATAL | STS_PCD | STS_ERR | STS_INT)
+
+ /* USBINTR: offset 0x08 */
+ u32 intr_enable;
+
+ /* FRINDEX: offset 0x0C */
+ u32 frame_index; /* current microframe number */
+ /* CTRLDSSEGMENT: offset 0x10 */
+ u32 segment; /* address bits 63:32 if needed */
+ /* PERIODICLISTBASE: offset 0x14 */
+ u32 frame_list; /* points to periodic list */
+ /* ASYNCLISTADDR: offset 0x18 */
+ u32 async_next; /* address of next async queue head */
+
+ u32 reserved[9];
+
+ /* CONFIGFLAG: offset 0x40 */
+ u32 configured_flag;
+#define FLAG_CF (1<<0) /* true: we'll support "high speed" */
+
+ /* PORTSC: offset 0x44 */
+ u32 port_status[0]; /* up to N_PORTS */
+/* 31:23 reserved */
+#define PORT_WKOC_E (1<<22) /* wake on overcurrent (enable) */
+#define PORT_WKDISC_E (1<<21) /* wake on disconnect (enable) */
+#define PORT_WKCONN_E (1<<20) /* wake on connect (enable) */
+/* 19:16 for port testing */
+#define PORT_LED_OFF (0<<14)
+#define PORT_LED_AMBER (1<<14)
+#define PORT_LED_GREEN (2<<14)
+#define PORT_LED_MASK (3<<14)
+#define PORT_OWNER (1<<13) /* true: companion hc owns this port */
+#define PORT_POWER (1<<12) /* true: has power (see PPC) */
+#define PORT_USB11(x) (((x)&(3<<10)) == (1<<10)) /* USB 1.1 device */
+/* 11:10 for detecting lowspeed devices (reset vs release ownership) */
+/* 9 reserved */
+#define PORT_RESET (1<<8) /* reset port */
+#define PORT_SUSPEND (1<<7) /* suspend port */
+#define PORT_RESUME (1<<6) /* resume it */
+#define PORT_OCC (1<<5) /* over current change */
+#define PORT_OC (1<<4) /* over current active */
+#define PORT_PEC (1<<3) /* port enable change */
+#define PORT_PE (1<<2) /* port enable */
+#define PORT_CSC (1<<1) /* connect status change */
+#define PORT_CONNECT (1<<0) /* device connected */
+#define PORT_RWC_BITS (PORT_CSC | PORT_PEC | PORT_OCC)
+} __attribute__ ((packed));
+
+/* Appendix C, Debug port ... intended for use with special "debug devices"
+ * that can help if there's no serial console. (nonstandard enumeration.)
+ */
+struct ehci_dbg_port {
+ u32 control;
+#define DBGP_OWNER (1<<30)
+#define DBGP_ENABLED (1<<28)
+#define DBGP_DONE (1<<16)
+#define DBGP_INUSE (1<<10)
+#define DBGP_ERRCODE(x) (((x)>>7)&0x07)
+# define DBGP_ERR_BAD 1
+# define DBGP_ERR_SIGNAL 2
+#define DBGP_ERROR (1<<6)
+#define DBGP_GO (1<<5)
+#define DBGP_OUT (1<<4)
+#define DBGP_LEN(x) (((x)>>0)&0x0f)
+ u32 pids;
+#define DBGP_PID_GET(x) (((x)>>16)&0xff)
+#define DBGP_PID_SET(data, tok) (((data)<<8)|(tok))
+ u32 data03;
+ u32 data47;
+ u32 address;
+#define DBGP_EPADDR(dev, ep) (((dev)<<8)|(ep))
+} __attribute__ ((packed));
+
+
+#define QTD_NEXT(dma) cpu_to_le32((u32)dma)
+
+/*
+ * EHCI Specification 0.95 Section 3.5
+ * QTD: describe data transfer components (buffer, direction, ...)
+ * See Fig 3-6 "Queue Element Transfer Descriptor Block Diagram".
+ *
+ * These are associated only with "QH" (Queue Head) structures,
+ * used with control, bulk, and interrupt transfers.
+ */
+struct ehci_qtd {
+ /* first part defined by EHCI spec */
+ __le32 hw_next; /* see EHCI 3.5.1 */
+ __le32 hw_alt_next; /* see EHCI 3.5.2 */
+ __le32 hw_token; /* see EHCI 3.5.3 */
+#define QTD_TOGGLE (1 << 31) /* data toggle */
+#define QTD_LENGTH(tok) (((tok)>>16) & 0x7fff)
+#define QTD_IOC (1 << 15) /* interrupt on complete */
+#define QTD_CERR(tok) (((tok)>>10) & 0x3)
+#define QTD_PID(tok) (((tok)>>8) & 0x3)
+#define QTD_STS_ACTIVE (1 << 7) /* HC may execute this */
+#define QTD_STS_HALT (1 << 6) /* halted on error */
+#define QTD_STS_DBE (1 << 5) /* data buffer error (in HC) */
+#define QTD_STS_BABBLE (1 << 4) /* device was babbling (qtd halted) */
+#define QTD_STS_XACT (1 << 3) /* device gave illegal response */
+#define QTD_STS_MMF (1 << 2) /* incomplete split transaction */
+#define QTD_STS_STS (1 << 1) /* split transaction state */
+#define QTD_STS_PING (1 << 0) /* issue PING? */
+ __le32 hw_buf[5]; /* see EHCI 3.5.4 */
+ __le32 hw_buf_hi[5]; /* Appendix B */
+
+ /* the rest is HCD-private */
+ dma_addr_t qtd_dma; /* qtd address */
+ struct list_head qtd_list; /* sw qtd list */
+ struct urb *urb; /* qtd's urb */
+ size_t length; /* length of buffer */
+
+ u32 qtd_buffer_len;
+ void *buffer;
+ dma_addr_t buffer_dma;
+ void *transfer_buffer;
+ void *transfer_dma;
+} __attribute__ ((aligned(32)));
+
+/* mask NakCnt+T in qh->hw_alt_next */
+#define QTD_MASK __constant_cpu_to_le32 (~0x1f)
+
+#define IS_SHORT_READ(token) (QTD_LENGTH(token) != 0 && QTD_PID(token) == 1)
+
+/* Type tag from {qh, itd, sitd, fstn}->hw_next */
+#define Q_NEXT_TYPE(dma) ((dma) & __constant_cpu_to_le32 (3 << 1))
+
+/* values for that type tag */
+#define Q_TYPE_QH __constant_cpu_to_le32 (1 << 1)
+
+/* next async queue entry, or pointer to interrupt/periodic QH */
+#define QH_NEXT(dma) (cpu_to_le32(((u32)dma)&~0x01f)|Q_TYPE_QH)
+
+/* for periodic/async schedules and qtd lists, mark end of list */
+#define EHCI_LIST_END __constant_cpu_to_le32(1) /* "null pointer" to hw */
+
+/*
+ * Entries in periodic shadow table are pointers to one of four kinds
+ * of data structure. That's dictated by the hardware; a type tag is
+ * encoded in the low bits of the hardware's periodic schedule. Use
+ * Q_NEXT_TYPE to get the tag.
+ *
+ * For entries in the async schedule, the type tag always says "qh".
+ */
+union ehci_shadow {
+ struct ehci_qh *qh; /* Q_TYPE_QH */
+ __le32 *hw_next; /* (all types) */
+ void *ptr;
+};
+
+/*
+ * EHCI Specification 0.95 Section 3.6
+ * QH: describes control/bulk/interrupt endpoints
+ * See Fig 3-7 "Queue Head Structure Layout".
+ *
+ * These appear in both the async and (for interrupt) periodic schedules.
+ */
+
+struct ehci_qh {
+ /* first part defined by EHCI spec */
+ __le32 hw_next; /* see EHCI 3.6.1 */
+ __le32 hw_info1; /* see EHCI 3.6.2 */
+#define QH_HEAD 0x00008000
+ __le32 hw_info2; /* see EHCI 3.6.2 */
+#define QH_SMASK 0x000000ff
+#define QH_CMASK 0x0000ff00
+#define QH_HUBADDR 0x007f0000
+#define QH_HUBPORT 0x3f800000
+#define QH_MULT 0xc0000000
+ __le32 hw_current; /* qtd list - see EHCI 3.6.4 */
+
+ /* qtd overlay (hardware parts of a struct ehci_qtd) */
+ __le32 hw_qtd_next;
+ __le32 hw_alt_next;
+ __le32 hw_token;
+ __le32 hw_buf[5];
+ __le32 hw_buf_hi[5];
+
+ /* the rest is HCD-private */
+ dma_addr_t qh_dma; /* address of qh */
+ union ehci_shadow qh_next; /* ptr to qh; or periodic */
+ struct list_head qtd_list; /* sw qtd list */
+ struct ehci_qtd *dummy;
+ struct ehci_qh *reclaim; /* next to reclaim */
+
+ struct oxu_hcd *oxu;
+ struct kref kref;
+ unsigned stamp;
+
+ u8 qh_state;
+#define QH_STATE_LINKED 1 /* HC sees this */
+#define QH_STATE_UNLINK 2 /* HC may still see this */
+#define QH_STATE_IDLE 3 /* HC doesn't see this */
+#define QH_STATE_UNLINK_WAIT 4 /* LINKED and on reclaim q */
+#define QH_STATE_COMPLETING 5 /* don't touch token.HALT */
+
+ /* periodic schedule info */
+ u8 usecs; /* intr bandwidth */
+ u8 gap_uf; /* uframes split/csplit gap */
+ u8 c_usecs; /* ... split completion bw */
+ u16 tt_usecs; /* tt downstream bandwidth */
+ unsigned short period; /* polling interval */
+ unsigned short start; /* where polling starts */
+#define NO_FRAME ((unsigned short)~0) /* pick new start */
+ struct usb_device *dev; /* access to TT */
+} __attribute__ ((aligned(32)));
+
+/*
+ * Proper OXU210HP structs
+ */
+
+#define OXU_OTG_CORE_OFFSET 0x00400
+#define OXU_OTG_CAP_OFFSET (OXU_OTG_CORE_OFFSET + 0x100)
+#define OXU_SPH_CORE_OFFSET 0x00800
+#define OXU_SPH_CAP_OFFSET (OXU_SPH_CORE_OFFSET + 0x100)
+
+#define OXU_OTG_MEM 0xE000
+#define OXU_SPH_MEM 0x16000
+
+/* Only how many elements & element structure are specifies here. */
+/* 2 host controllers are enabled - total size <= 28 kbytes */
+#define DEFAULT_I_TDPS 1024
+#define QHEAD_NUM 16
+#define QTD_NUM 32
+#define SITD_NUM 8
+#define MURB_NUM 8
+
+#define BUFFER_NUM 8
+#define BUFFER_SIZE 512
+
+struct oxu_info {
+ struct usb_hcd *hcd[2];
+};
+
+struct oxu_buf {
+ u8 buffer[BUFFER_SIZE];
+} __attribute__ ((aligned(BUFFER_SIZE)));
+
+struct oxu_onchip_mem {
+ struct oxu_buf db_pool[BUFFER_NUM];
+
+ u32 frame_list[DEFAULT_I_TDPS];
+ struct ehci_qh qh_pool[QHEAD_NUM];
+ struct ehci_qtd qtd_pool[QTD_NUM];
+} __attribute__ ((aligned(4 << 10)));
+
+#define EHCI_MAX_ROOT_PORTS 15 /* see HCS_N_PORTS */
+
+struct oxu_murb {
+ struct urb urb;
+ struct urb *main;
+ u8 last;
+};
+
+struct oxu_hcd { /* one per controller */
+ unsigned int is_otg:1;
+
+ u8 qh_used[QHEAD_NUM];
+ u8 qtd_used[QTD_NUM];
+ u8 db_used[BUFFER_NUM];
+ u8 murb_used[MURB_NUM];
+
+ struct oxu_onchip_mem __iomem *mem;
+ spinlock_t mem_lock;
+
+ struct timer_list urb_timer;
+
+ struct ehci_caps __iomem *caps;
+ struct ehci_regs __iomem *regs;
+
+ __u32 hcs_params; /* cached register copy */
+ spinlock_t lock;
+
+ /* async schedule support */
+ struct ehci_qh *async;
+ struct ehci_qh *reclaim;
+ unsigned reclaim_ready:1;
+ unsigned scanning:1;
+
+ /* periodic schedule support */
+ unsigned periodic_size;
+ __le32 *periodic; /* hw periodic table */
+ dma_addr_t periodic_dma;
+ unsigned i_thresh; /* uframes HC might cache */
+
+ union ehci_shadow *pshadow; /* mirror hw periodic table */
+ int next_uframe; /* scan periodic, start here */
+ unsigned periodic_sched; /* periodic activity count */
+
+ /* per root hub port */
+ unsigned long reset_done[EHCI_MAX_ROOT_PORTS];
+ /* bit vectors (one bit per port) */
+ unsigned long bus_suspended; /* which ports were
+ * already suspended at the
+ * start of a bus suspend
+ */
+ unsigned long companion_ports;/* which ports are dedicated
+ * to the companion controller
+ */
+
+ struct timer_list watchdog;
+ unsigned long actions;
+ unsigned stamp;
+ unsigned long next_statechange;
+ u32 command;
+
+ /* SILICON QUIRKS */
+ struct list_head urb_list; /* this is the head to urb
+ * queue that didn't get enough
+ * resources
+ */
+ struct oxu_murb *murb_pool; /* murb per split big urb */
+ unsigned urb_len;
+
+ u8 sbrn; /* packed release number */
+};
+
+#define EHCI_IAA_JIFFIES (HZ/100) /* arbitrary; ~10 msec */
+#define EHCI_IO_JIFFIES (HZ/10) /* io watchdog > irq_thresh */
+#define EHCI_ASYNC_JIFFIES (HZ/20) /* async idle timeout */
+#define EHCI_SHRINK_JIFFIES (HZ/200) /* async qh unlink delay */
+
+enum ehci_timer_action {
+ TIMER_IO_WATCHDOG,
+ TIMER_IAA_WATCHDOG,
+ TIMER_ASYNC_SHRINK,
+ TIMER_ASYNC_OFF,
+};
+
+#include
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index ae6e70edd74..75b69847918 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -172,9 +172,9 @@ static void __devinit quirk_usb_handoff_ohci(struct pci_dev *pdev)
if (!mmio_resource_enabled(pdev, 0))
return;
- base = ioremap_nocache(pci_resource_start(pdev, 0),
- pci_resource_len(pdev, 0));
- if (base == NULL) return;
+ base = pci_ioremap_bar(pdev, 0);
+ if (base == NULL)
+ return;
/* On PA-RISC, PDC can leave IR set incorrectly; ignore it there. */
#ifndef __hppa__
@@ -221,9 +221,9 @@ static void __devinit quirk_usb_disable_ehci(struct pci_dev *pdev)
if (!mmio_resource_enabled(pdev, 0))
return;
- base = ioremap_nocache(pci_resource_start(pdev, 0),
- pci_resource_len(pdev, 0));
- if (base == NULL) return;
+ base = pci_ioremap_bar(pdev, 0);
+ if (base == NULL)
+ return;
cap_length = readb(base);
op_reg_base = base + cap_length;
@@ -271,7 +271,7 @@ static void __devinit quirk_usb_disable_ehci(struct pci_dev *pdev)
/* if boot firmware now owns EHCI, spin till
* it hands it over.
*/
- msec = 5000;
+ msec = 1000;
while ((cap & EHCI_USBLEGSUP_BIOS) && (msec > 0)) {
tried_handoff = 1;
msleep(10);
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index c21f14e0666..319041205b5 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -2275,7 +2275,6 @@ static int __init_or_module r8a66597_remove(struct platform_device *pdev)
return 0;
}
-#define resource_len(r) (((r)->end - (r)->start) + 1)
static int __init r8a66597_probe(struct platform_device *pdev)
{
#if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) && defined(CONFIG_HAVE_CLK)
@@ -2296,11 +2295,10 @@ static int __init r8a66597_probe(struct platform_device *pdev)
goto clean_up;
}
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- (char *)hcd_name);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
ret = -ENODEV;
- dev_err(&pdev->dev, "platform_get_resource_byname error.\n");
+ dev_err(&pdev->dev, "platform_get_resource error.\n");
goto clean_up;
}
@@ -2315,7 +2313,7 @@ static int __init r8a66597_probe(struct platform_device *pdev)
irq = ires->start;
irq_trigger = ires->flags & IRQF_TRIGGER_MASK;
- reg = ioremap(res->start, resource_len(res));
+ reg = ioremap(res->start, resource_size(res));
if (reg == NULL) {
ret = -ENOMEM;
dev_err(&pdev->dev, "ioremap error.\n");
diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
index cf5e4cf7ea4..4e221060f58 100644
--- a/drivers/usb/host/uhci-hcd.c
+++ b/drivers/usb/host/uhci-hcd.c
@@ -942,6 +942,8 @@ static struct pci_driver uhci_pci_driver = {
#ifdef CONFIG_PM
.suspend = usb_hcd_pci_suspend,
+ .suspend_late = usb_hcd_pci_suspend_late,
+ .resume_early = usb_hcd_pci_resume_early,
.resume = usb_hcd_pci_resume,
#endif /* PM */
};
diff --git a/drivers/usb/image/microtek.c b/drivers/usb/image/microtek.c
index 885867a86de..4541dfcea88 100644
--- a/drivers/usb/image/microtek.c
+++ b/drivers/usb/image/microtek.c
@@ -350,17 +350,16 @@ static int mts_scsi_abort(struct scsi_cmnd *srb)
static int mts_scsi_host_reset(struct scsi_cmnd *srb)
{
struct mts_desc* desc = (struct mts_desc*)(srb->device->host->hostdata[0]);
- int result, rc;
+ int result;
MTS_DEBUG_GOT_HERE();
mts_debug_dump(desc);
- rc = usb_lock_device_for_reset(desc->usb_dev, desc->usb_intf);
- if (rc < 0)
- return FAILED;
- result = usb_reset_device(desc->usb_dev);
- if (rc)
+ result = usb_lock_device_for_reset(desc->usb_dev, desc->usb_intf);
+ if (result == 0) {
+ result = usb_reset_device(desc->usb_dev);
usb_unlock_device(desc->usb_dev);
+ }
return result ? FAILED : SUCCESS;
}
diff --git a/drivers/usb/misc/berry_charge.c b/drivers/usb/misc/berry_charge.c
index 24e2dc3148a..c05a85bc592 100644
--- a/drivers/usb/misc/berry_charge.c
+++ b/drivers/usb/misc/berry_charge.c
@@ -123,6 +123,11 @@ static int berry_probe(struct usb_interface *intf,
{
struct usb_device *udev = interface_to_usbdev(intf);
+ if (udev->bus_mA < 500) {
+ dbg(&udev->dev, "Not enough power to charge available\n");
+ return -ENODEV;
+ }
+
dbg(&udev->dev, "Power is set to %dmA\n",
udev->actconfig->desc.bMaxPower * 2);
diff --git a/drivers/usb/misc/emi26.c b/drivers/usb/misc/emi26.c
index e762beb5f3c..879a980ca8c 100644
--- a/drivers/usb/misc/emi26.c
+++ b/drivers/usb/misc/emi26.c
@@ -160,7 +160,7 @@ static int emi26_load_firmware (struct usb_device *dev)
err("%s - error loading firmware: error = %d", __func__, err);
goto wraperr;
}
- } while (i > 0);
+ } while (rec);
/* Assert reset (stop the CPU in the EMI) */
err = emi26_set_reset(dev,1);
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index 444c69c447b..5f1a19d1497 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -192,8 +192,6 @@ static struct urb *simple_alloc_urb (
{
struct urb *urb;
- if (bytes < 0)
- return NULL;
urb = usb_alloc_urb (0, GFP_KERNEL);
if (!urb)
return urb;
diff --git a/drivers/usb/mon/Kconfig b/drivers/usb/mon/Kconfig
index deb9ddffa40..f28f350cd96 100644
--- a/drivers/usb/mon/Kconfig
+++ b/drivers/usb/mon/Kconfig
@@ -3,14 +3,13 @@
#
config USB_MON
- bool "USB Monitor"
- depends on USB!=n
- default y
+ tristate "USB Monitor"
+ depends on USB
+ default y if USB=y
+ default m if USB=m
help
- If you say Y here, a component which captures the USB traffic
+ If you select this option, a component which captures the USB traffic
between peripheral-specific drivers and HC drivers will be built.
For more information, see .
- This is somewhat experimental at this time, but it should be safe.
-
- If unsure, say Y.
+ If unsure, say Y (if allowed), otherwise M.
diff --git a/drivers/usb/mon/Makefile b/drivers/usb/mon/Makefile
index 0f76ed5e161..c6516b56673 100644
--- a/drivers/usb/mon/Makefile
+++ b/drivers/usb/mon/Makefile
@@ -4,5 +4,4 @@
usbmon-objs := mon_main.o mon_stat.o mon_text.o mon_bin.o mon_dma.o
-# This does not use CONFIG_USB_MON because we want this to use a tristate.
-obj-$(CONFIG_USB) += usbmon.o
+obj-$(CONFIG_USB_MON) += usbmon.o
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index 4b9542bbb35..5af7379cd9a 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -11,7 +11,7 @@ config USB_MUSB_HDRC
depends on (USB || USB_GADGET) && HAVE_CLK
depends on !SUPERH
select TWL4030_USB if MACH_OMAP_3430SDP
- tristate 'Inventra Highspeed Dual Role Controller (TI, ...)'
+ tristate 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)'
help
Say Y here if your system has a dual role high speed USB
controller based on the Mentor Graphics silicon IP. Then
@@ -22,6 +22,9 @@ config USB_MUSB_HDRC
Texas Instruments parts using this IP include DaVinci 644x,
OMAP 243x, OMAP 343x, and TUSB 6010.
+ Analog Devices parts using this IP include Blackfin BF54x,
+ BF525 and BF527.
+
If you do not know what this is, please say N.
To compile this driver as a module, choose M here; the
@@ -33,6 +36,8 @@ config USB_MUSB_SOC
default y if ARCH_DAVINCI
default y if ARCH_OMAP2430
default y if ARCH_OMAP34XX
+ default y if (BF54x && !BF544)
+ default y if (BF52x && !BF522 && !BF523)
comment "DaVinci 644x USB support"
depends on USB_MUSB_HDRC && ARCH_DAVINCI
@@ -43,6 +48,9 @@ comment "OMAP 243x high speed USB support"
comment "OMAP 343x high speed USB support"
depends on USB_MUSB_HDRC && ARCH_OMAP34XX
+comment "Blackfin high speed USB Support"
+ depends on USB_MUSB_HDRC && (BF54x && !BF544) || (BF52x && !BF522 && !BF523)
+
config USB_TUSB6010
boolean "TUSB 6010 support"
depends on USB_MUSB_HDRC && !USB_MUSB_SOC
@@ -142,7 +150,7 @@ config MUSB_PIO_ONLY
config USB_INVENTRA_DMA
bool
depends on USB_MUSB_HDRC && !MUSB_PIO_ONLY
- default ARCH_OMAP2430 || ARCH_OMAP34XX
+ default ARCH_OMAP2430 || ARCH_OMAP34XX || BLACKFIN
help
Enable DMA transfers using Mentor's engine.
diff --git a/drivers/usb/musb/Makefile b/drivers/usb/musb/Makefile
index b6af0d687a7..85710ccc188 100644
--- a/drivers/usb/musb/Makefile
+++ b/drivers/usb/musb/Makefile
@@ -22,6 +22,14 @@ ifeq ($(CONFIG_ARCH_OMAP3430),y)
musb_hdrc-objs += omap2430.o
endif
+ifeq ($(CONFIG_BF54x),y)
+ musb_hdrc-objs += blackfin.o
+endif
+
+ifeq ($(CONFIG_BF52x),y)
+ musb_hdrc-objs += blackfin.o
+endif
+
ifeq ($(CONFIG_USB_GADGET_MUSB_HDRC),y)
musb_hdrc-objs += musb_gadget_ep0.o musb_gadget.o
endif
diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c
new file mode 100644
index 00000000000..78613485209
--- /dev/null
+++ b/drivers/usb/musb/blackfin.c
@@ -0,0 +1,320 @@
+/*
+ * MUSB OTG controller driver for Blackfin Processors
+ *
+ * Copyright 2006-2008 Analog Devices Inc.
+ *
+ * Enter bugs at http://blackfin.uclinux.org/
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+
+#include "musb_core.h"
+#include "blackfin.h"
+
+/*
+ * Load an endpoint's FIFO
+ */
+void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src)
+{
+ void __iomem *fifo = hw_ep->fifo;
+ void __iomem *epio = hw_ep->regs;
+
+ prefetch((u8 *)src);
+
+ musb_writew(epio, MUSB_TXCOUNT, len);
+
+ DBG(4, "TX ep%d fifo %p count %d buf %p, epio %p\n",
+ hw_ep->epnum, fifo, len, src, epio);
+
+ dump_fifo_data(src, len);
+
+ if (unlikely((unsigned long)src & 0x01))
+ outsw_8((unsigned long)fifo, src,
+ len & 0x01 ? (len >> 1) + 1 : len >> 1);
+ else
+ outsw((unsigned long)fifo, src,
+ len & 0x01 ? (len >> 1) + 1 : len >> 1);
+}
+
+/*
+ * Unload an endpoint's FIFO
+ */
+void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
+{
+ void __iomem *fifo = hw_ep->fifo;
+ u8 epnum = hw_ep->epnum;
+ u16 dma_reg = 0;
+
+ DBG(4, "%cX ep%d fifo %p count %d buf %p\n",
+ 'R', hw_ep->epnum, fifo, len, dst);
+
+#ifdef CONFIG_BF52x
+ invalidate_dcache_range((unsigned int)dst,
+ (unsigned int)(dst + len));
+
+ /* Setup DMA address register */
+ dma_reg = (u16) ((u32) dst & 0xFFFF);
+ bfin_write16(USB_DMA_REG(epnum, USB_DMAx_ADDR_LOW), dma_reg);
+ SSYNC();
+
+ dma_reg = (u16) (((u32) dst >> 16) & 0xFFFF);
+ bfin_write16(USB_DMA_REG(epnum, USB_DMAx_ADDR_HIGH), dma_reg);
+ SSYNC();
+
+ /* Setup DMA count register */
+ bfin_write16(USB_DMA_REG(epnum, USB_DMAx_COUNT_LOW), len);
+ bfin_write16(USB_DMA_REG(epnum, USB_DMAx_COUNT_HIGH), 0);
+ SSYNC();
+
+ /* Enable the DMA */
+ dma_reg = (epnum << 4) | DMA_ENA | INT_ENA;
+ bfin_write16(USB_DMA_REG(epnum, USB_DMAx_CTRL), dma_reg);
+ SSYNC();
+
+ /* Wait for compelete */
+ while (!(bfin_read_USB_DMA_INTERRUPT() & (1 << epnum)))
+ cpu_relax();
+
+ /* acknowledge dma interrupt */
+ bfin_write_USB_DMA_INTERRUPT(1 << epnum);
+ SSYNC();
+
+ /* Reset DMA */
+ bfin_write16(USB_DMA_REG(epnum, USB_DMAx_CTRL), 0);
+ SSYNC();
+#else
+ if (unlikely((unsigned long)dst & 0x01))
+ insw_8((unsigned long)fifo, dst,
+ len & 0x01 ? (len >> 1) + 1 : len >> 1);
+ else
+ insw((unsigned long)fifo, dst,
+ len & 0x01 ? (len >> 1) + 1 : len >> 1);
+#endif
+
+ dump_fifo_data(dst, len);
+}
+
+static irqreturn_t blackfin_interrupt(int irq, void *__hci)
+{
+ unsigned long flags;
+ irqreturn_t retval = IRQ_NONE;
+ struct musb *musb = __hci;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ musb->int_usb = musb_readb(musb->mregs, MUSB_INTRUSB);
+ musb->int_tx = musb_readw(musb->mregs, MUSB_INTRTX);
+ musb->int_rx = musb_readw(musb->mregs, MUSB_INTRRX);
+
+ if (musb->int_usb || musb->int_tx || musb->int_rx) {
+ musb_writeb(musb->mregs, MUSB_INTRUSB, musb->int_usb);
+ musb_writew(musb->mregs, MUSB_INTRTX, musb->int_tx);
+ musb_writew(musb->mregs, MUSB_INTRRX, musb->int_rx);
+ retval = musb_interrupt(musb);
+ }
+
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ /* REVISIT we sometimes get spurious IRQs on g_ep0
+ * not clear why... fall in BF54x too.
+ */
+ if (retval != IRQ_HANDLED)
+ DBG(5, "spurious?\n");
+
+ return IRQ_HANDLED;
+}
+
+static void musb_conn_timer_handler(unsigned long _musb)
+{
+ struct musb *musb = (void *)_musb;
+ unsigned long flags;
+ u16 val;
+
+ spin_lock_irqsave(&musb->lock, flags);
+ switch (musb->xceiv.state) {
+ case OTG_STATE_A_IDLE:
+ case OTG_STATE_A_WAIT_BCON:
+ /* Start a new session */
+ val = musb_readw(musb->mregs, MUSB_DEVCTL);
+ val |= MUSB_DEVCTL_SESSION;
+ musb_writew(musb->mregs, MUSB_DEVCTL, val);
+
+ val = musb_readw(musb->mregs, MUSB_DEVCTL);
+ if (!(val & MUSB_DEVCTL_BDEVICE)) {
+ gpio_set_value(musb->config->gpio_vrsel, 1);
+ musb->xceiv.state = OTG_STATE_A_WAIT_BCON;
+ } else {
+ gpio_set_value(musb->config->gpio_vrsel, 0);
+
+ /* Ignore VBUSERROR and SUSPEND IRQ */
+ val = musb_readb(musb->mregs, MUSB_INTRUSBE);
+ val &= ~MUSB_INTR_VBUSERROR;
+ musb_writeb(musb->mregs, MUSB_INTRUSBE, val);
+
+ val = MUSB_INTR_SUSPEND | MUSB_INTR_VBUSERROR;
+ musb_writeb(musb->mregs, MUSB_INTRUSB, val);
+
+ val = MUSB_POWER_HSENAB;
+ musb_writeb(musb->mregs, MUSB_POWER, val);
+ }
+ mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY);
+ break;
+
+ default:
+ DBG(1, "%s state not handled\n", otg_state_string(musb));
+ break;
+ }
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ DBG(4, "state is %s\n", otg_state_string(musb));
+}
+
+void musb_platform_enable(struct musb *musb)
+{
+ if (is_host_enabled(musb)) {
+ mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY);
+ musb->a_wait_bcon = TIMER_DELAY;
+ }
+}
+
+void musb_platform_disable(struct musb *musb)
+{
+}
+
+static void bfin_vbus_power(struct musb *musb, int is_on, int sleeping)
+{
+}
+
+static void bfin_set_vbus(struct musb *musb, int is_on)
+{
+ if (is_on)
+ gpio_set_value(musb->config->gpio_vrsel, 1);
+ else
+ gpio_set_value(musb->config->gpio_vrsel, 0);
+
+ DBG(1, "VBUS %s, devctl %02x "
+ /* otg %3x conf %08x prcm %08x */ "\n",
+ otg_state_string(musb),
+ musb_readb(musb->mregs, MUSB_DEVCTL));
+}
+
+static int bfin_set_power(struct otg_transceiver *x, unsigned mA)
+{
+ return 0;
+}
+
+void musb_platform_try_idle(struct musb *musb, unsigned long timeout)
+{
+ if (is_host_enabled(musb))
+ mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY);
+}
+
+int musb_platform_get_vbus_status(struct musb *musb)
+{
+ return 0;
+}
+
+void musb_platform_set_mode(struct musb *musb, u8 musb_mode)
+{
+}
+
+int __init musb_platform_init(struct musb *musb)
+{
+
+ /*
+ * Rev 1.0 BF549 EZ-KITs require PE7 to be high for both DEVICE
+ * and OTG HOST modes, while rev 1.1 and greater require PE7 to
+ * be low for DEVICE mode and high for HOST mode. We set it high
+ * here because we are in host mode
+ */
+
+ if (gpio_request(musb->config->gpio_vrsel, "USB_VRSEL")) {
+ printk(KERN_ERR "Failed ro request USB_VRSEL GPIO_%d \n",
+ musb->config->gpio_vrsel);
+ return -ENODEV;
+ }
+ gpio_direction_output(musb->config->gpio_vrsel, 0);
+
+ if (ANOMALY_05000346) {
+ bfin_write_USB_APHY_CALIB(ANOMALY_05000346_value);
+ SSYNC();
+ }
+
+ if (ANOMALY_05000347) {
+ bfin_write_USB_APHY_CNTRL(0x0);
+ SSYNC();
+ }
+
+ /* TODO
+ * Set SIC-IVG register
+ */
+
+ /* Configure PLL oscillator register */
+ bfin_write_USB_PLLOSC_CTRL(0x30a8);
+ SSYNC();
+
+ bfin_write_USB_SRP_CLKDIV((get_sclk()/1000) / 32 - 1);
+ SSYNC();
+
+ bfin_write_USB_EP_NI0_RXMAXP(64);
+ SSYNC();
+
+ bfin_write_USB_EP_NI0_TXMAXP(64);
+ SSYNC();
+
+ /* Route INTRUSB/INTR_RX/INTR_TX to USB_INT0*/
+ bfin_write_USB_GLOBINTR(0x7);
+ SSYNC();
+
+ bfin_write_USB_GLOBAL_CTL(GLOBAL_ENA | EP1_TX_ENA | EP2_TX_ENA |
+ EP3_TX_ENA | EP4_TX_ENA | EP5_TX_ENA |
+ EP6_TX_ENA | EP7_TX_ENA | EP1_RX_ENA |
+ EP2_RX_ENA | EP3_RX_ENA | EP4_RX_ENA |
+ EP5_RX_ENA | EP6_RX_ENA | EP7_RX_ENA);
+ SSYNC();
+
+ if (is_host_enabled(musb)) {
+ musb->board_set_vbus = bfin_set_vbus;
+ setup_timer(&musb_conn_timer,
+ musb_conn_timer_handler, (unsigned long) musb);
+ }
+ if (is_peripheral_enabled(musb))
+ musb->xceiv.set_power = bfin_set_power;
+
+ musb->isr = blackfin_interrupt;
+
+ return 0;
+}
+
+int musb_platform_suspend(struct musb *musb)
+{
+ return 0;
+}
+
+int musb_platform_resume(struct musb *musb)
+{
+ return 0;
+}
+
+
+int musb_platform_exit(struct musb *musb)
+{
+
+ bfin_vbus_power(musb, 0 /*off*/, 1);
+ gpio_free(musb->config->gpio_vrsel);
+ musb_platform_suspend(musb);
+
+ return 0;
+}
diff --git a/drivers/usb/musb/blackfin.h b/drivers/usb/musb/blackfin.h
new file mode 100644
index 00000000000..a240c1e53d1
--- /dev/null
+++ b/drivers/usb/musb/blackfin.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2007 by Analog Devices, Inc.
+ *
+ * The Inventra Controller Driver for Linux is free software; you
+ * can redistribute it and/or modify it under the terms of the GNU
+ * General Public License version 2 as published by the Free Software
+ * Foundation.
+ */
+
+#ifndef __MUSB_BLACKFIN_H__
+#define __MUSB_BLACKFIN_H__
+
+/*
+ * Blackfin specific definitions
+ */
+
+#undef DUMP_FIFO_DATA
+#ifdef DUMP_FIFO_DATA
+static void dump_fifo_data(u8 *buf, u16 len)
+{
+ u8 *tmp = buf;
+ int i;
+
+ for (i = 0; i < len; i++) {
+ if (!(i % 16) && i)
+ pr_debug("\n");
+ pr_debug("%02x ", *tmp++);
+ }
+ pr_debug("\n");
+}
+#else
+#define dump_fifo_data(buf, len) do {} while (0)
+#endif
+
+#ifdef CONFIG_BF52x
+
+#define USB_DMA_BASE USB_DMA_INTERRUPT
+#define USB_DMAx_CTRL 0x04
+#define USB_DMAx_ADDR_LOW 0x08
+#define USB_DMAx_ADDR_HIGH 0x0C
+#define USB_DMAx_COUNT_LOW 0x10
+#define USB_DMAx_COUNT_HIGH 0x14
+
+#define USB_DMA_REG(ep, reg) (USB_DMA_BASE + 0x20 * ep + reg)
+#endif
+
+/* Almost 1 second */
+#define TIMER_DELAY (1 * HZ)
+
+static struct timer_list musb_conn_timer;
+
+#endif /* __MUSB_BLACKFIN_H__ */
diff --git a/drivers/usb/musb/davinci.c b/drivers/usb/musb/davinci.c
index dfb3bcbe00f..0d566dc5ce0 100644
--- a/drivers/usb/musb/davinci.c
+++ b/drivers/usb/musb/davinci.c
@@ -32,9 +32,9 @@
#include
#include
-#include
-#include
-#include
+#include
+#include
+#include
#include
#include "musb_core.h"
@@ -364,6 +364,18 @@ static irqreturn_t davinci_interrupt(int irq, void *__hci)
return IRQ_HANDLED;
}
+int musb_platform_set_mode(struct musb *musb, u8 mode)
+{
+ /* EVM can't do this (right?) */
+ return -EIO;
+}
+
+int musb_platform_set_mode(struct musb *musb, u8 mode)
+{
+ /* EVM can't do this (right?) */
+ return -EIO;
+}
+
int __init musb_platform_init(struct musb *musb)
{
void __iomem *tibase = musb->ctrl_base;
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 5280dba9b1f..6c7faacfb53 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -148,7 +148,8 @@ static inline struct musb *dev_to_musb(struct device *dev)
/*-------------------------------------------------------------------------*/
-#ifndef CONFIG_USB_TUSB6010
+#if !defined(CONFIG_USB_TUSB6010) && !defined(CONFIG_BLACKFIN)
+
/*
* Load an endpoint's FIFO
*/
@@ -1124,25 +1125,25 @@ fifo_setup(struct musb *musb, struct musb_hw_ep *hw_ep,
#endif
switch (cfg->style) {
case FIFO_TX:
- musb_writeb(mbase, MUSB_TXFIFOSZ, c_size);
- musb_writew(mbase, MUSB_TXFIFOADD, c_off);
+ musb_write_txfifosz(mbase, c_size);
+ musb_write_txfifoadd(mbase, c_off);
hw_ep->tx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB);
hw_ep->max_packet_sz_tx = maxpacket;
break;
case FIFO_RX:
- musb_writeb(mbase, MUSB_RXFIFOSZ, c_size);
- musb_writew(mbase, MUSB_RXFIFOADD, c_off);
+ musb_write_rxfifosz(mbase, c_size);
+ musb_write_rxfifoadd(mbase, c_off);
hw_ep->rx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB);
hw_ep->max_packet_sz_rx = maxpacket;
break;
case FIFO_RXTX:
- musb_writeb(mbase, MUSB_TXFIFOSZ, c_size);
- musb_writew(mbase, MUSB_TXFIFOADD, c_off);
+ musb_write_txfifosz(mbase, c_size);
+ musb_write_txfifoadd(mbase, c_off);
hw_ep->rx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB);
hw_ep->max_packet_sz_rx = maxpacket;
- musb_writeb(mbase, MUSB_RXFIFOSZ, c_size);
- musb_writew(mbase, MUSB_RXFIFOADD, c_off);
+ musb_write_rxfifosz(mbase, c_size);
+ musb_write_rxfifoadd(mbase, c_off);
hw_ep->tx_double_buffered = hw_ep->rx_double_buffered;
hw_ep->max_packet_sz_tx = maxpacket;
@@ -1212,7 +1213,7 @@ static int __init ep_config_from_table(struct musb *musb)
if (epn >= musb->config->num_eps) {
pr_debug("%s: invalid ep %d\n",
musb_driver_name, epn);
- continue;
+ return -EINVAL;
}
offset = fifo_setup(musb, hw_ep + epn, cfg++, offset);
if (offset < 0) {
@@ -1246,9 +1247,10 @@ static int __init ep_config_from_table(struct musb *musb)
*/
static int __init ep_config_from_hw(struct musb *musb)
{
- u8 epnum = 0, reg;
+ u8 epnum = 0;
struct musb_hw_ep *hw_ep;
void *mbase = musb->mregs;
+ int ret = 0;
DBG(2, "<== static silicon ep config\n");
@@ -1258,26 +1260,9 @@ static int __init ep_config_from_hw(struct musb *musb)
musb_ep_select(mbase, epnum);
hw_ep = musb->endpoints + epnum;
- /* read from core using indexed model */
- reg = musb_readb(hw_ep->regs, 0x10 + MUSB_FIFOSIZE);
- if (!reg) {
- /* 0's returned when no more endpoints */
+ ret = musb_read_fifosize(musb, hw_ep, epnum);
+ if (ret < 0)
break;
- }
- musb->nr_endpoints++;
- musb->epmask |= (1 << epnum);
-
- hw_ep->max_packet_sz_tx = 1 << (reg & 0x0f);
-
- /* shared TX/RX FIFO? */
- if ((reg & 0xf0) == 0xf0) {
- hw_ep->max_packet_sz_rx = hw_ep->max_packet_sz_tx;
- hw_ep->is_shared_fifo = true;
- continue;
- } else {
- hw_ep->max_packet_sz_rx = 1 << ((reg & 0xf0) >> 4);
- hw_ep->is_shared_fifo = false;
- }
/* FIXME set up hw_ep->{rx,tx}_double_buffered */
@@ -1326,7 +1311,7 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb)
/* log core options (read using indexed model) */
musb_ep_select(mbase, 0);
- reg = musb_readb(mbase, 0x10 + MUSB_CONFIGDATA);
+ reg = musb_read_configdata(mbase);
strcpy(aInfo, (reg & MUSB_CONFIGDATA_UTMIDW) ? "UTMI-16" : "UTMI-8");
if (reg & MUSB_CONFIGDATA_DYNFIFO)
@@ -1391,7 +1376,7 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb)
}
/* log release info */
- hwvers = musb_readw(mbase, MUSB_HWVERS);
+ hwvers = musb_read_hwvers(mbase);
rev_major = (hwvers >> 10) & 0x1f;
rev_minor = hwvers & 0x3ff;
snprintf(aRevision, 32, "%d.%d%s", rev_major,
@@ -1400,8 +1385,7 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb)
musb_driver_name, type, aRevision, aDate);
/* configure ep0 */
- musb->endpoints[0].max_packet_sz_tx = MUSB_EP0_FIFOSIZE;
- musb->endpoints[0].max_packet_sz_rx = MUSB_EP0_FIFOSIZE;
+ musb_configure_ep0(musb);
/* discover endpoint configuration */
musb->nr_endpoints = 1;
@@ -1445,7 +1429,7 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb)
hw_ep->regs = MUSB_EP_OFFSET(i, 0) + mbase;
#ifdef CONFIG_USB_MUSB_HDRC_HCD
- hw_ep->target_regs = MUSB_BUSCTL_OFFSET(i, 0) + mbase;
+ hw_ep->target_regs = musb_read_target_reg_base(i, mbase);
hw_ep->rx_reinit = 1;
hw_ep->tx_reinit = 1;
#endif
@@ -1671,17 +1655,20 @@ musb_mode_store(struct device *dev, struct device_attribute *attr,
{
struct musb *musb = dev_to_musb(dev);
unsigned long flags;
+ int status;
spin_lock_irqsave(&musb->lock, flags);
- if (!strncmp(buf, "host", 4))
- musb_platform_set_mode(musb, MUSB_HOST);
- if (!strncmp(buf, "peripheral", 10))
- musb_platform_set_mode(musb, MUSB_PERIPHERAL);
- if (!strncmp(buf, "otg", 3))
- musb_platform_set_mode(musb, MUSB_OTG);
+ if (sysfs_streq(buf, "host"))
+ status = musb_platform_set_mode(musb, MUSB_HOST);
+ else if (sysfs_streq(buf, "peripheral"))
+ status = musb_platform_set_mode(musb, MUSB_PERIPHERAL);
+ else if (sysfs_streq(buf, "otg"))
+ status = musb_platform_set_mode(musb, MUSB_OTG);
+ else
+ status = -EINVAL;
spin_unlock_irqrestore(&musb->lock, flags);
- return n;
+ return (status == 0) ? n : status;
}
static DEVICE_ATTR(mode, 0644, musb_mode_show, musb_mode_store);
@@ -1781,7 +1768,7 @@ allocate_instance(struct device *dev,
#ifdef CONFIG_USB_MUSB_HDRC_HCD
struct usb_hcd *hcd;
- hcd = usb_create_hcd(&musb_hc_driver, dev, dev->bus_id);
+ hcd = usb_create_hcd(&musb_hc_driver, dev, dev_name(dev));
if (!hcd)
return NULL;
/* usbcore sets dev->driver_data to hcd, and sometimes uses that... */
@@ -1810,7 +1797,6 @@ allocate_instance(struct device *dev,
for (epnum = 0, ep = musb->endpoints;
epnum < musb->config->num_eps;
epnum++, ep++) {
-
ep->musb = musb;
ep->epnum = epnum;
}
@@ -1838,7 +1824,7 @@ static void musb_free(struct musb *musb)
musb_gadget_cleanup(musb);
#endif
- if (musb->nIrq >= 0) {
+ if (musb->nIrq >= 0 && musb->irq_wake) {
disable_irq_wake(musb->nIrq);
free_irq(musb->nIrq, musb);
}
@@ -1984,15 +1970,19 @@ bad_config:
INIT_WORK(&musb->irq_work, musb_irq_work);
/* attach to the IRQ */
- if (request_irq(nIrq, musb->isr, 0, dev->bus_id, musb)) {
+ if (request_irq(nIrq, musb->isr, 0, dev_name(dev), musb)) {
dev_err(dev, "request_irq %d failed!\n", nIrq);
status = -ENODEV;
goto fail2;
}
musb->nIrq = nIrq;
/* FIXME this handles wakeup irqs wrong */
- if (enable_irq_wake(nIrq) == 0)
+ if (enable_irq_wake(nIrq) == 0) {
+ musb->irq_wake = 1;
device_init_wakeup(dev, 1);
+ } else {
+ musb->irq_wake = 0;
+ }
pr_info("%s: USB %s mode controller at %p using %s, IRQ %d\n",
musb_driver_name,
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index 82227251931..630946a2d9f 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -191,7 +191,7 @@ enum musb_g_ep0_state {
*/
#if defined(CONFIG_ARCH_DAVINCI) || defined(CONFIG_ARCH_OMAP2430) \
- || defined(CONFIG_ARCH_OMAP3430)
+ || defined(CONFIG_ARCH_OMAP3430) || defined(CONFIG_BLACKFIN)
/* REVISIT indexed access seemed to
* misbehave (on DaVinci) for at least peripheral IN ...
*/
@@ -359,6 +359,7 @@ struct musb {
struct otg_transceiver xceiv;
int nIrq;
+ unsigned irq_wake:1;
struct musb_hw_ep endpoints[MUSB_C_NUM_EPS];
#define control_ep endpoints
@@ -447,6 +448,70 @@ static inline struct musb *gadget_to_musb(struct usb_gadget *g)
}
#endif
+#ifdef CONFIG_BLACKFIN
+static inline int musb_read_fifosize(struct musb *musb,
+ struct musb_hw_ep *hw_ep, u8 epnum)
+{
+ musb->nr_endpoints++;
+ musb->epmask |= (1 << epnum);
+
+ if (epnum < 5) {
+ hw_ep->max_packet_sz_tx = 128;
+ hw_ep->max_packet_sz_rx = 128;
+ } else {
+ hw_ep->max_packet_sz_tx = 1024;
+ hw_ep->max_packet_sz_rx = 1024;
+ }
+ hw_ep->is_shared_fifo = false;
+
+ return 0;
+}
+
+static inline void musb_configure_ep0(struct musb *musb)
+{
+ musb->endpoints[0].max_packet_sz_tx = MUSB_EP0_FIFOSIZE;
+ musb->endpoints[0].max_packet_sz_rx = MUSB_EP0_FIFOSIZE;
+ musb->endpoints[0].is_shared_fifo = true;
+}
+
+#else
+
+static inline int musb_read_fifosize(struct musb *musb,
+ struct musb_hw_ep *hw_ep, u8 epnum)
+{
+ u8 reg = 0;
+
+ /* read from core using indexed model */
+ reg = musb_readb(hw_ep->regs, 0x10 + MUSB_FIFOSIZE);
+ /* 0's returned when no more endpoints */
+ if (!reg)
+ return -ENODEV;
+
+ musb->nr_endpoints++;
+ musb->epmask |= (1 << epnum);
+
+ hw_ep->max_packet_sz_tx = 1 << (reg & 0x0f);
+
+ /* shared TX/RX FIFO? */
+ if ((reg & 0xf0) == 0xf0) {
+ hw_ep->max_packet_sz_rx = hw_ep->max_packet_sz_tx;
+ hw_ep->is_shared_fifo = true;
+ return 0;
+ } else {
+ hw_ep->max_packet_sz_rx = 1 << ((reg & 0xf0) >> 4);
+ hw_ep->is_shared_fifo = false;
+ }
+
+ return 0;
+}
+
+static inline void musb_configure_ep0(struct musb *musb)
+{
+ musb->endpoints[0].max_packet_sz_tx = MUSB_EP0_FIFOSIZE;
+ musb->endpoints[0].max_packet_sz_rx = MUSB_EP0_FIFOSIZE;
+}
+#endif /* CONFIG_BLACKFIN */
+
/***************************** Glue it together *****************************/
@@ -467,16 +532,16 @@ extern void musb_platform_disable(struct musb *musb);
extern void musb_hnp_stop(struct musb *musb);
-extern void musb_platform_set_mode(struct musb *musb, u8 musb_mode);
+extern int musb_platform_set_mode(struct musb *musb, u8 musb_mode);
-#if defined(CONFIG_USB_TUSB6010) || \
+#if defined(CONFIG_USB_TUSB6010) || defined(CONFIG_BLACKFIN) || \
defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP34XX)
extern void musb_platform_try_idle(struct musb *musb, unsigned long timeout);
#else
#define musb_platform_try_idle(x, y) do {} while (0)
#endif
-#ifdef CONFIG_USB_TUSB6010
+#if defined(CONFIG_USB_TUSB6010) || defined(CONFIG_BLACKFIN)
extern int musb_platform_get_vbus_status(struct musb *musb);
#else
#define musb_platform_get_vbus_status(x) 0
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index d6a802c224f..6197daeab8f 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -1633,7 +1633,7 @@ int __init musb_gadget_setup(struct musb *musb)
musb->g.speed = USB_SPEED_UNKNOWN;
/* this "gadget" abstracts/virtualizes the controller */
- strcpy(musb->g.dev.bus_id, "gadget");
+ dev_set_name(&musb->g.dev, "gadget");
musb->g.dev.parent = musb->controller;
musb->g.dev.dma_mask = musb->controller->dma_mask;
musb->g.dev.release = musb_gadget_release;
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index cc64462d4c4..99fa6123487 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -112,18 +112,21 @@ static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
{
void __iomem *epio = ep->regs;
u16 csr;
+ u16 lastcsr = 0;
int retries = 1000;
csr = musb_readw(epio, MUSB_TXCSR);
while (csr & MUSB_TXCSR_FIFONOTEMPTY) {
- DBG(5, "Host TX FIFONOTEMPTY csr: %02x\n", csr);
+ if (csr != lastcsr)
+ DBG(3, "Host TX FIFONOTEMPTY csr: %02x\n", csr);
+ lastcsr = csr;
csr |= MUSB_TXCSR_FLUSHFIFO;
musb_writew(epio, MUSB_TXCSR, csr);
csr = musb_readw(epio, MUSB_TXCSR);
- if (retries-- < 1) {
- ERR("Could not flush host TX fifo: csr: %04x\n", csr);
+ if (WARN(retries-- < 1,
+ "Could not flush host TX%d fifo: csr: %04x\n",
+ ep->epnum, csr))
return;
- }
mdelay(1);
}
}
@@ -268,7 +271,7 @@ __musb_giveback(struct musb *musb, struct urb *urb, int status)
__releases(musb->lock)
__acquires(musb->lock)
{
- DBG(({ int level; switch (urb->status) {
+ DBG(({ int level; switch (status) {
case 0:
level = 4;
break;
@@ -283,8 +286,8 @@ __acquires(musb->lock)
level = 2;
break;
}; level; }),
- "complete %p (%d), dev%d ep%d%s, %d/%d\n",
- urb, urb->status,
+ "complete %p %pF (%d), dev%d ep%d%s, %d/%d\n",
+ urb, urb->complete, status,
usb_pipedevice(urb->pipe),
usb_pipeendpoint(urb->pipe),
usb_pipein(urb->pipe) ? "in" : "out",
@@ -593,12 +596,10 @@ musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep)
/* target addr and (for multipoint) hub addr/port */
if (musb->is_multipoint) {
- musb_writeb(ep->target_regs, MUSB_RXFUNCADDR,
- qh->addr_reg);
- musb_writeb(ep->target_regs, MUSB_RXHUBADDR,
- qh->h_addr_reg);
- musb_writeb(ep->target_regs, MUSB_RXHUBPORT,
- qh->h_port_reg);
+ musb_write_rxfunaddr(ep->target_regs, qh->addr_reg);
+ musb_write_rxhubaddr(ep->target_regs, qh->h_addr_reg);
+ musb_write_rxhubport(ep->target_regs, qh->h_port_reg);
+
} else
musb_writeb(musb->mregs, MUSB_FADDR, qh->addr_reg);
@@ -712,15 +713,9 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
/* target addr and (for multipoint) hub addr/port */
if (musb->is_multipoint) {
- musb_writeb(mbase,
- MUSB_BUSCTL_OFFSET(epnum, MUSB_TXFUNCADDR),
- qh->addr_reg);
- musb_writeb(mbase,
- MUSB_BUSCTL_OFFSET(epnum, MUSB_TXHUBADDR),
- qh->h_addr_reg);
- musb_writeb(mbase,
- MUSB_BUSCTL_OFFSET(epnum, MUSB_TXHUBPORT),
- qh->h_port_reg);
+ musb_write_txfunaddr(mbase, epnum, qh->addr_reg);
+ musb_write_txhubaddr(mbase, epnum, qh->h_addr_reg);
+ musb_write_txhubport(mbase, epnum, qh->h_port_reg);
/* FIXME if !epnum, do the same for RX ... */
} else
musb_writeb(mbase, MUSB_FADDR, qh->addr_reg);
@@ -988,8 +983,10 @@ static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
if (fifo_count) {
fifo_dest = (u8 *) (urb->transfer_buffer
+ urb->actual_length);
- DBG(3, "Sending %d bytes to %p\n",
- fifo_count, fifo_dest);
+ DBG(3, "Sending %d byte%s to ep0 fifo %p\n",
+ fifo_count,
+ (fifo_count == 1) ? "" : "s",
+ fifo_dest);
musb_write_fifo(hw_ep, fifo_count, fifo_dest);
urb->actual_length += fifo_count;
diff --git a/drivers/usb/musb/musb_io.h b/drivers/usb/musb/musb_io.h
index 223f0a51409..b06e9ef00cf 100644
--- a/drivers/usb/musb/musb_io.h
+++ b/drivers/usb/musb/musb_io.h
@@ -39,7 +39,7 @@
#if !defined(CONFIG_ARM) && !defined(CONFIG_SUPERH) \
&& !defined(CONFIG_AVR32) && !defined(CONFIG_PPC32) \
- && !defined(CONFIG_PPC64)
+ && !defined(CONFIG_PPC64) && !defined(CONFIG_BLACKFIN)
static inline void readsl(const void __iomem *addr, void *buf, int len)
{ insl((unsigned long)addr, buf, len); }
static inline void readsw(const void __iomem *addr, void *buf, int len)
@@ -56,6 +56,8 @@ static inline void writesb(const void __iomem *addr, const void *buf, int len)
#endif
+#ifndef CONFIG_BLACKFIN
+
/* NOTE: these offsets are all in bytes */
static inline u16 musb_readw(const void __iomem *addr, unsigned offset)
@@ -114,4 +116,26 @@ static inline void musb_writeb(void __iomem *addr, unsigned offset, u8 data)
#endif /* CONFIG_USB_TUSB6010 */
+#else
+
+static inline u8 musb_readb(const void __iomem *addr, unsigned offset)
+ { return (u8) (bfin_read16(addr + offset)); }
+
+static inline u16 musb_readw(const void __iomem *addr, unsigned offset)
+ { return bfin_read16(addr + offset); }
+
+static inline u32 musb_readl(const void __iomem *addr, unsigned offset)
+ { return (u32) (bfin_read16(addr + offset)); }
+
+static inline void musb_writeb(void __iomem *addr, unsigned offset, u8 data)
+ { bfin_write16(addr + offset, (u16) data); }
+
+static inline void musb_writew(void __iomem *addr, unsigned offset, u16 data)
+ { bfin_write16(addr + offset, data); }
+
+static inline void musb_writel(void __iomem *addr, unsigned offset, u32 data)
+ { bfin_write16(addr + offset, (u16) data); }
+
+#endif /* CONFIG_BLACKFIN */
+
#endif
diff --git a/drivers/usb/musb/musb_regs.h b/drivers/usb/musb/musb_regs.h
index 9c228661aa5..de3b2f18db4 100644
--- a/drivers/usb/musb/musb_regs.h
+++ b/drivers/usb/musb/musb_regs.h
@@ -37,97 +37,6 @@
#define MUSB_EP0_FIFOSIZE 64 /* This is non-configurable */
-/*
- * Common USB registers
- */
-
-#define MUSB_FADDR 0x00 /* 8-bit */
-#define MUSB_POWER 0x01 /* 8-bit */
-
-#define MUSB_INTRTX 0x02 /* 16-bit */
-#define MUSB_INTRRX 0x04
-#define MUSB_INTRTXE 0x06
-#define MUSB_INTRRXE 0x08
-#define MUSB_INTRUSB 0x0A /* 8 bit */
-#define MUSB_INTRUSBE 0x0B /* 8 bit */
-#define MUSB_FRAME 0x0C
-#define MUSB_INDEX 0x0E /* 8 bit */
-#define MUSB_TESTMODE 0x0F /* 8 bit */
-
-/* Get offset for a given FIFO from musb->mregs */
-#ifdef CONFIG_USB_TUSB6010
-#define MUSB_FIFO_OFFSET(epnum) (0x200 + ((epnum) * 0x20))
-#else
-#define MUSB_FIFO_OFFSET(epnum) (0x20 + ((epnum) * 4))
-#endif
-
-/*
- * Additional Control Registers
- */
-
-#define MUSB_DEVCTL 0x60 /* 8 bit */
-
-/* These are always controlled through the INDEX register */
-#define MUSB_TXFIFOSZ 0x62 /* 8-bit (see masks) */
-#define MUSB_RXFIFOSZ 0x63 /* 8-bit (see masks) */
-#define MUSB_TXFIFOADD 0x64 /* 16-bit offset shifted right 3 */
-#define MUSB_RXFIFOADD 0x66 /* 16-bit offset shifted right 3 */
-
-/* REVISIT: vctrl/vstatus: optional vendor utmi+phy register at 0x68 */
-#define MUSB_HWVERS 0x6C /* 8 bit */
-
-#define MUSB_EPINFO 0x78 /* 8 bit */
-#define MUSB_RAMINFO 0x79 /* 8 bit */
-#define MUSB_LINKINFO 0x7a /* 8 bit */
-#define MUSB_VPLEN 0x7b /* 8 bit */
-#define MUSB_HS_EOF1 0x7c /* 8 bit */
-#define MUSB_FS_EOF1 0x7d /* 8 bit */
-#define MUSB_LS_EOF1 0x7e /* 8 bit */
-
-/* Offsets to endpoint registers */
-#define MUSB_TXMAXP 0x00
-#define MUSB_TXCSR 0x02
-#define MUSB_CSR0 MUSB_TXCSR /* Re-used for EP0 */
-#define MUSB_RXMAXP 0x04
-#define MUSB_RXCSR 0x06
-#define MUSB_RXCOUNT 0x08
-#define MUSB_COUNT0 MUSB_RXCOUNT /* Re-used for EP0 */
-#define MUSB_TXTYPE 0x0A
-#define MUSB_TYPE0 MUSB_TXTYPE /* Re-used for EP0 */
-#define MUSB_TXINTERVAL 0x0B
-#define MUSB_NAKLIMIT0 MUSB_TXINTERVAL /* Re-used for EP0 */
-#define MUSB_RXTYPE 0x0C
-#define MUSB_RXINTERVAL 0x0D
-#define MUSB_FIFOSIZE 0x0F
-#define MUSB_CONFIGDATA MUSB_FIFOSIZE /* Re-used for EP0 */
-
-/* Offsets to endpoint registers in indexed model (using INDEX register) */
-#define MUSB_INDEXED_OFFSET(_epnum, _offset) \
- (0x10 + (_offset))
-
-/* Offsets to endpoint registers in flat models */
-#define MUSB_FLAT_OFFSET(_epnum, _offset) \
- (0x100 + (0x10*(_epnum)) + (_offset))
-
-#ifdef CONFIG_USB_TUSB6010
-/* TUSB6010 EP0 configuration register is special */
-#define MUSB_TUSB_OFFSET(_epnum, _offset) \
- (0x10 + _offset)
-#include "tusb6010.h" /* Needed "only" for TUSB_EP0_CONF */
-#endif
-
-/* "bus control"/target registers, for host side multipoint (external hubs) */
-#define MUSB_TXFUNCADDR 0x00
-#define MUSB_TXHUBADDR 0x02
-#define MUSB_TXHUBPORT 0x03
-
-#define MUSB_RXFUNCADDR 0x04
-#define MUSB_RXHUBADDR 0x06
-#define MUSB_RXHUBPORT 0x07
-
-#define MUSB_BUSCTL_OFFSET(_epnum, _offset) \
- (0x80 + (8*(_epnum)) + (_offset))
-
/*
* MUSB Register bits
*/
@@ -228,7 +137,6 @@
/* TXCSR in Peripheral and Host mode */
#define MUSB_TXCSR_AUTOSET 0x8000
-#define MUSB_TXCSR_MODE 0x2000
#define MUSB_TXCSR_DMAENAB 0x1000
#define MUSB_TXCSR_FRCDATATOG 0x0800
#define MUSB_TXCSR_DMAMODE 0x0400
@@ -297,4 +205,309 @@
/* HUBADDR */
#define MUSB_HUBADDR_MULTI_TT 0x80
+
+#ifndef CONFIG_BLACKFIN
+
+/*
+ * Common USB registers
+ */
+
+#define MUSB_FADDR 0x00 /* 8-bit */
+#define MUSB_POWER 0x01 /* 8-bit */
+
+#define MUSB_INTRTX 0x02 /* 16-bit */
+#define MUSB_INTRRX 0x04
+#define MUSB_INTRTXE 0x06
+#define MUSB_INTRRXE 0x08
+#define MUSB_INTRUSB 0x0A /* 8 bit */
+#define MUSB_INTRUSBE 0x0B /* 8 bit */
+#define MUSB_FRAME 0x0C
+#define MUSB_INDEX 0x0E /* 8 bit */
+#define MUSB_TESTMODE 0x0F /* 8 bit */
+
+/* Get offset for a given FIFO from musb->mregs */
+#ifdef CONFIG_USB_TUSB6010
+#define MUSB_FIFO_OFFSET(epnum) (0x200 + ((epnum) * 0x20))
+#else
+#define MUSB_FIFO_OFFSET(epnum) (0x20 + ((epnum) * 4))
+#endif
+
+/*
+ * Additional Control Registers
+ */
+
+#define MUSB_DEVCTL 0x60 /* 8 bit */
+
+/* These are always controlled through the INDEX register */
+#define MUSB_TXFIFOSZ 0x62 /* 8-bit (see masks) */
+#define MUSB_RXFIFOSZ 0x63 /* 8-bit (see masks) */
+#define MUSB_TXFIFOADD 0x64 /* 16-bit offset shifted right 3 */
+#define MUSB_RXFIFOADD 0x66 /* 16-bit offset shifted right 3 */
+
+/* REVISIT: vctrl/vstatus: optional vendor utmi+phy register at 0x68 */
+#define MUSB_HWVERS 0x6C /* 8 bit */
+
+#define MUSB_EPINFO 0x78 /* 8 bit */
+#define MUSB_RAMINFO 0x79 /* 8 bit */
+#define MUSB_LINKINFO 0x7a /* 8 bit */
+#define MUSB_VPLEN 0x7b /* 8 bit */
+#define MUSB_HS_EOF1 0x7c /* 8 bit */
+#define MUSB_FS_EOF1 0x7d /* 8 bit */
+#define MUSB_LS_EOF1 0x7e /* 8 bit */
+
+/* Offsets to endpoint registers */
+#define MUSB_TXMAXP 0x00
+#define MUSB_TXCSR 0x02
+#define MUSB_CSR0 MUSB_TXCSR /* Re-used for EP0 */
+#define MUSB_RXMAXP 0x04
+#define MUSB_RXCSR 0x06
+#define MUSB_RXCOUNT 0x08
+#define MUSB_COUNT0 MUSB_RXCOUNT /* Re-used for EP0 */
+#define MUSB_TXTYPE 0x0A
+#define MUSB_TYPE0 MUSB_TXTYPE /* Re-used for EP0 */
+#define MUSB_TXINTERVAL 0x0B
+#define MUSB_NAKLIMIT0 MUSB_TXINTERVAL /* Re-used for EP0 */
+#define MUSB_RXTYPE 0x0C
+#define MUSB_RXINTERVAL 0x0D
+#define MUSB_FIFOSIZE 0x0F
+#define MUSB_CONFIGDATA MUSB_FIFOSIZE /* Re-used for EP0 */
+
+/* Offsets to endpoint registers in indexed model (using INDEX register) */
+#define MUSB_INDEXED_OFFSET(_epnum, _offset) \
+ (0x10 + (_offset))
+
+/* Offsets to endpoint registers in flat models */
+#define MUSB_FLAT_OFFSET(_epnum, _offset) \
+ (0x100 + (0x10*(_epnum)) + (_offset))
+
+#ifdef CONFIG_USB_TUSB6010
+/* TUSB6010 EP0 configuration register is special */
+#define MUSB_TUSB_OFFSET(_epnum, _offset) \
+ (0x10 + _offset)
+#include "tusb6010.h" /* Needed "only" for TUSB_EP0_CONF */
+#endif
+
+#define MUSB_TXCSR_MODE 0x2000
+
+/* "bus control"/target registers, for host side multipoint (external hubs) */
+#define MUSB_TXFUNCADDR 0x00
+#define MUSB_TXHUBADDR 0x02
+#define MUSB_TXHUBPORT 0x03
+
+#define MUSB_RXFUNCADDR 0x04
+#define MUSB_RXHUBADDR 0x06
+#define MUSB_RXHUBPORT 0x07
+
+#define MUSB_BUSCTL_OFFSET(_epnum, _offset) \
+ (0x80 + (8*(_epnum)) + (_offset))
+
+static inline void musb_write_txfifosz(void __iomem *mbase, u8 c_size)
+{
+ musb_writeb(mbase, MUSB_TXFIFOSZ, c_size);
+}
+
+static inline void musb_write_txfifoadd(void __iomem *mbase, u16 c_off)
+{
+ musb_writew(mbase, MUSB_TXFIFOADD, c_off);
+}
+
+static inline void musb_write_rxfifosz(void __iomem *mbase, u8 c_size)
+{
+ musb_writeb(mbase, MUSB_RXFIFOSZ, c_size);
+}
+
+static inline void musb_write_rxfifoadd(void __iomem *mbase, u16 c_off)
+{
+ musb_writew(mbase, MUSB_RXFIFOADD, c_off);
+}
+
+static inline u8 musb_read_configdata(void __iomem *mbase)
+{
+ return musb_readb(mbase, 0x10 + MUSB_CONFIGDATA);
+}
+
+static inline u16 musb_read_hwvers(void __iomem *mbase)
+{
+ return musb_readw(mbase, MUSB_HWVERS);
+}
+
+static inline void __iomem *musb_read_target_reg_base(u8 i, void __iomem *mbase)
+{
+ return (MUSB_BUSCTL_OFFSET(i, 0) + mbase);
+}
+
+static inline void musb_write_rxfunaddr(void __iomem *ep_target_regs,
+ u8 qh_addr_reg)
+{
+ musb_writeb(ep_target_regs, MUSB_RXFUNCADDR, qh_addr_reg);
+}
+
+static inline void musb_write_rxhubaddr(void __iomem *ep_target_regs,
+ u8 qh_h_addr_reg)
+{
+ musb_writeb(ep_target_regs, MUSB_RXHUBADDR, qh_h_addr_reg);
+}
+
+static inline void musb_write_rxhubport(void __iomem *ep_target_regs,
+ u8 qh_h_port_reg)
+{
+ musb_writeb(ep_target_regs, MUSB_RXHUBPORT, qh_h_port_reg);
+}
+
+static inline void musb_write_txfunaddr(void __iomem *mbase, u8 epnum,
+ u8 qh_addr_reg)
+{
+ musb_writeb(mbase, MUSB_BUSCTL_OFFSET(epnum, MUSB_TXFUNCADDR),
+ qh_addr_reg);
+}
+
+static inline void musb_write_txhubaddr(void __iomem *mbase, u8 epnum,
+ u8 qh_addr_reg)
+{
+ musb_writeb(mbase, MUSB_BUSCTL_OFFSET(epnum, MUSB_TXHUBADDR),
+ qh_addr_reg);
+}
+
+static inline void musb_write_txhubport(void __iomem *mbase, u8 epnum,
+ u8 qh_h_port_reg)
+{
+ musb_writeb(mbase, MUSB_BUSCTL_OFFSET(epnum, MUSB_TXHUBPORT),
+ qh_h_port_reg);
+}
+
+#else /* CONFIG_BLACKFIN */
+
+#define USB_BASE USB_FADDR
+#define USB_OFFSET(reg) (reg - USB_BASE)
+
+/*
+ * Common USB registers
+ */
+#define MUSB_FADDR USB_OFFSET(USB_FADDR) /* 8-bit */
+#define MUSB_POWER USB_OFFSET(USB_POWER) /* 8-bit */
+#define MUSB_INTRTX USB_OFFSET(USB_INTRTX) /* 16-bit */
+#define MUSB_INTRRX USB_OFFSET(USB_INTRRX)
+#define MUSB_INTRTXE USB_OFFSET(USB_INTRTXE)
+#define MUSB_INTRRXE USB_OFFSET(USB_INTRRXE)
+#define MUSB_INTRUSB USB_OFFSET(USB_INTRUSB) /* 8 bit */
+#define MUSB_INTRUSBE USB_OFFSET(USB_INTRUSBE)/* 8 bit */
+#define MUSB_FRAME USB_OFFSET(USB_FRAME)
+#define MUSB_INDEX USB_OFFSET(USB_INDEX) /* 8 bit */
+#define MUSB_TESTMODE USB_OFFSET(USB_TESTMODE)/* 8 bit */
+
+/* Get offset for a given FIFO from musb->mregs */
+#define MUSB_FIFO_OFFSET(epnum) \
+ (USB_OFFSET(USB_EP0_FIFO) + ((epnum) * 8))
+
+/*
+ * Additional Control Registers
+ */
+
+#define MUSB_DEVCTL USB_OFFSET(USB_OTG_DEV_CTL) /* 8 bit */
+
+#define MUSB_LINKINFO USB_OFFSET(USB_LINKINFO)/* 8 bit */
+#define MUSB_VPLEN USB_OFFSET(USB_VPLEN) /* 8 bit */
+#define MUSB_HS_EOF1 USB_OFFSET(USB_HS_EOF1) /* 8 bit */
+#define MUSB_FS_EOF1 USB_OFFSET(USB_FS_EOF1) /* 8 bit */
+#define MUSB_LS_EOF1 USB_OFFSET(USB_LS_EOF1) /* 8 bit */
+
+/* Offsets to endpoint registers */
+#define MUSB_TXMAXP 0x00
+#define MUSB_TXCSR 0x04
+#define MUSB_CSR0 MUSB_TXCSR /* Re-used for EP0 */
+#define MUSB_RXMAXP 0x08
+#define MUSB_RXCSR 0x0C
+#define MUSB_RXCOUNT 0x10
+#define MUSB_COUNT0 MUSB_RXCOUNT /* Re-used for EP0 */
+#define MUSB_TXTYPE 0x14
+#define MUSB_TYPE0 MUSB_TXTYPE /* Re-used for EP0 */
+#define MUSB_TXINTERVAL 0x18
+#define MUSB_NAKLIMIT0 MUSB_TXINTERVAL /* Re-used for EP0 */
+#define MUSB_RXTYPE 0x1C
+#define MUSB_RXINTERVAL 0x20
+#define MUSB_TXCOUNT 0x28
+
+/* Offsets to endpoint registers in indexed model (using INDEX register) */
+#define MUSB_INDEXED_OFFSET(_epnum, _offset) \
+ (0x40 + (_offset))
+
+/* Offsets to endpoint registers in flat models */
+#define MUSB_FLAT_OFFSET(_epnum, _offset) \
+ (USB_OFFSET(USB_EP_NI0_TXMAXP) + (0x40 * (_epnum)) + (_offset))
+
+/* Not implemented - HW has seperate Tx/Rx FIFO */
+#define MUSB_TXCSR_MODE 0x0000
+
+/*
+ * Dummy stub for clk framework, it will be removed
+ * until Blackfin supports clk framework
+ */
+#define clk_get(dev, id) NULL
+#define clk_put(clock) do {} while (0)
+#define clk_enable(clock) do {} while (0)
+#define clk_disable(clock) do {} while (0)
+
+static inline void musb_write_txfifosz(void __iomem *mbase, u8 c_size)
+{
+}
+
+static inline void musb_write_txfifoadd(void __iomem *mbase, u16 c_off)
+{
+}
+
+static inline void musb_write_rxfifosz(void __iomem *mbase, u8 c_size)
+{
+}
+
+static inline void musb_write_rxfifoadd(void __iomem *mbase, u16 c_off)
+{
+}
+
+static inline u8 musb_read_configdata(void __iomem *mbase)
+{
+ return 0;
+}
+
+static inline u16 musb_read_hwvers(void __iomem *mbase)
+{
+ return 0;
+}
+
+static inline u16 musb_read_target_reg_base(u8 i, void __iomem *mbase)
+{
+ return 0;
+}
+
+static inline void musb_write_rxfunaddr(void __iomem *ep_target_regs,
+ u8 qh_addr_req)
+{
+}
+
+static inline void musb_write_rxhubaddr(void __iomem *ep_target_regs,
+ u8 qh_h_addr_reg)
+{
+}
+
+static inline void musb_write_rxhubport(void __iomem *ep_target_regs,
+ u8 qh_h_port_reg)
+{
+}
+
+static inline void musb_write_txfunaddr(void __iomem *mbase, u8 epnum,
+ u8 qh_addr_reg)
+{
+}
+
+static inline void musb_write_txhubaddr(void __iomem *mbase, u8 epnum,
+ u8 qh_addr_reg)
+{
+}
+
+static inline void musb_write_txhubport(void __iomem *mbase, u8 epnum,
+ u8 qh_h_port_reg)
+{
+}
+
+#endif /* CONFIG_BLACKFIN */
+
#endif /* __MUSB_REGS_H__ */
diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c
index 8c734ef2c1e..8662e9e159c 100644
--- a/drivers/usb/musb/musbhsdma.c
+++ b/drivers/usb/musb/musbhsdma.c
@@ -34,58 +34,7 @@
#include
#include
#include "musb_core.h"
-
-#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3430)
-#include "omap2430.h"
-#endif
-
-#define MUSB_HSDMA_BASE 0x200
-#define MUSB_HSDMA_INTR (MUSB_HSDMA_BASE + 0)
-#define MUSB_HSDMA_CONTROL 0x4
-#define MUSB_HSDMA_ADDRESS 0x8
-#define MUSB_HSDMA_COUNT 0xc
-
-#define MUSB_HSDMA_CHANNEL_OFFSET(_bchannel, _offset) \
- (MUSB_HSDMA_BASE + (_bchannel << 4) + _offset)
-
-/* control register (16-bit): */
-#define MUSB_HSDMA_ENABLE_SHIFT 0
-#define MUSB_HSDMA_TRANSMIT_SHIFT 1
-#define MUSB_HSDMA_MODE1_SHIFT 2
-#define MUSB_HSDMA_IRQENABLE_SHIFT 3
-#define MUSB_HSDMA_ENDPOINT_SHIFT 4
-#define MUSB_HSDMA_BUSERROR_SHIFT 8
-#define MUSB_HSDMA_BURSTMODE_SHIFT 9
-#define MUSB_HSDMA_BURSTMODE (3 << MUSB_HSDMA_BURSTMODE_SHIFT)
-#define MUSB_HSDMA_BURSTMODE_UNSPEC 0
-#define MUSB_HSDMA_BURSTMODE_INCR4 1
-#define MUSB_HSDMA_BURSTMODE_INCR8 2
-#define MUSB_HSDMA_BURSTMODE_INCR16 3
-
-#define MUSB_HSDMA_CHANNELS 8
-
-struct musb_dma_controller;
-
-struct musb_dma_channel {
- struct dma_channel channel;
- struct musb_dma_controller *controller;
- u32 start_addr;
- u32 len;
- u16 max_packet_sz;
- u8 idx;
- u8 epnum;
- u8 transmit;
-};
-
-struct musb_dma_controller {
- struct dma_controller controller;
- struct musb_dma_channel channel[MUSB_HSDMA_CHANNELS];
- void *private_data;
- void __iomem *base;
- u8 channel_count;
- u8 used_channels;
- u8 irq;
-};
+#include "musbhsdma.h"
static int dma_controller_start(struct dma_controller *c)
{
@@ -203,12 +152,8 @@ static void configure_channel(struct dma_channel *channel,
: 0);
/* address/count */
- musb_writel(mbase,
- MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_ADDRESS),
- dma_addr);
- musb_writel(mbase,
- MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT),
- len);
+ musb_write_hsdma_addr(mbase, bchannel, dma_addr);
+ musb_write_hsdma_count(mbase, bchannel, len);
/* control (this should start things) */
musb_writew(mbase,
@@ -279,13 +224,8 @@ static int dma_channel_abort(struct dma_channel *channel)
musb_writew(mbase,
MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_CONTROL),
0);
- musb_writel(mbase,
- MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_ADDRESS),
- 0);
- musb_writel(mbase,
- MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT),
- 0);
-
+ musb_write_hsdma_addr(mbase, bchannel, 0);
+ musb_write_hsdma_count(mbase, bchannel, 0);
channel->status = MUSB_DMA_STATUS_FREE;
}
@@ -333,10 +273,8 @@ static irqreturn_t dma_controller_irq(int irq, void *private_data)
} else {
u8 devctl;
- addr = musb_readl(mbase,
- MUSB_HSDMA_CHANNEL_OFFSET(
- bchannel,
- MUSB_HSDMA_ADDRESS));
+ addr = musb_read_hsdma_addr(mbase,
+ bchannel);
channel->actual_len = addr
- musb_channel->start_addr;
@@ -375,6 +313,12 @@ static irqreturn_t dma_controller_irq(int irq, void *private_data)
}
}
}
+
+#ifdef CONFIG_BLACKFIN
+ /* Clear DMA interrup flags */
+ musb_writeb(mbase, MUSB_HSDMA_INTR, int_hsdma);
+#endif
+
retval = IRQ_HANDLED;
done:
spin_unlock_irqrestore(&musb->lock, flags);
@@ -424,7 +368,7 @@ dma_controller_create(struct musb *musb, void __iomem *base)
controller->controller.channel_abort = dma_channel_abort;
if (request_irq(irq, dma_controller_irq, IRQF_DISABLED,
- musb->controller->bus_id, &controller->controller)) {
+ dev_name(musb->controller), &controller->controller)) {
dev_err(dev, "request_irq %d failed!\n", irq);
dma_controller_destroy(&controller->controller);
diff --git a/drivers/usb/musb/musbhsdma.h b/drivers/usb/musb/musbhsdma.h
new file mode 100644
index 00000000000..1299d92dc83
--- /dev/null
+++ b/drivers/usb/musb/musbhsdma.h
@@ -0,0 +1,149 @@
+/*
+ * MUSB OTG driver - support for Mentor's DMA controller
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2007 by Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3430)
+#include "omap2430.h"
+#endif
+
+#ifndef CONFIG_BLACKFIN
+
+#define MUSB_HSDMA_BASE 0x200
+#define MUSB_HSDMA_INTR (MUSB_HSDMA_BASE + 0)
+#define MUSB_HSDMA_CONTROL 0x4
+#define MUSB_HSDMA_ADDRESS 0x8
+#define MUSB_HSDMA_COUNT 0xc
+
+#define MUSB_HSDMA_CHANNEL_OFFSET(_bchannel, _offset) \
+ (MUSB_HSDMA_BASE + (_bchannel << 4) + _offset)
+
+#define musb_read_hsdma_addr(mbase, bchannel) \
+ musb_readl(mbase, \
+ MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_ADDRESS))
+
+#define musb_write_hsdma_addr(mbase, bchannel, addr) \
+ musb_writel(mbase, \
+ MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_ADDRESS), \
+ addr)
+
+#define musb_write_hsdma_count(mbase, bchannel, len) \
+ musb_writel(mbase, \
+ MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT), \
+ len)
+#else
+
+#define MUSB_HSDMA_BASE 0x400
+#define MUSB_HSDMA_INTR (MUSB_HSDMA_BASE + 0)
+#define MUSB_HSDMA_CONTROL 0x04
+#define MUSB_HSDMA_ADDR_LOW 0x08
+#define MUSB_HSDMA_ADDR_HIGH 0x0C
+#define MUSB_HSDMA_COUNT_LOW 0x10
+#define MUSB_HSDMA_COUNT_HIGH 0x14
+
+#define MUSB_HSDMA_CHANNEL_OFFSET(_bchannel, _offset) \
+ (MUSB_HSDMA_BASE + (_bchannel * 0x20) + _offset)
+
+static inline u32 musb_read_hsdma_addr(void __iomem *mbase, u8 bchannel)
+{
+ u32 addr = musb_readw(mbase,
+ MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_ADDR_HIGH));
+
+ addr = addr << 16;
+
+ addr |= musb_readw(mbase,
+ MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_ADDR_LOW));
+
+ return addr;
+}
+
+static inline void musb_write_hsdma_addr(void __iomem *mbase,
+ u8 bchannel, dma_addr_t dma_addr)
+{
+ musb_writew(mbase,
+ MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_ADDR_LOW),
+ ((u16)((u32) dma_addr & 0xFFFF)));
+ musb_writew(mbase,
+ MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_ADDR_HIGH),
+ ((u16)(((u32) dma_addr >> 16) & 0xFFFF)));
+}
+
+static inline void musb_write_hsdma_count(void __iomem *mbase,
+ u8 bchannel, u32 len)
+{
+ musb_writew(mbase,
+ MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_LOW),
+ ((u16)((u32) len & 0xFFFF)));
+ musb_writew(mbase,
+ MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_HIGH),
+ ((u16)(((u32) len >> 16) & 0xFFFF)));
+}
+
+#endif /* CONFIG_BLACKFIN */
+
+/* control register (16-bit): */
+#define MUSB_HSDMA_ENABLE_SHIFT 0
+#define MUSB_HSDMA_TRANSMIT_SHIFT 1
+#define MUSB_HSDMA_MODE1_SHIFT 2
+#define MUSB_HSDMA_IRQENABLE_SHIFT 3
+#define MUSB_HSDMA_ENDPOINT_SHIFT 4
+#define MUSB_HSDMA_BUSERROR_SHIFT 8
+#define MUSB_HSDMA_BURSTMODE_SHIFT 9
+#define MUSB_HSDMA_BURSTMODE (3 << MUSB_HSDMA_BURSTMODE_SHIFT)
+#define MUSB_HSDMA_BURSTMODE_UNSPEC 0
+#define MUSB_HSDMA_BURSTMODE_INCR4 1
+#define MUSB_HSDMA_BURSTMODE_INCR8 2
+#define MUSB_HSDMA_BURSTMODE_INCR16 3
+
+#define MUSB_HSDMA_CHANNELS 8
+
+struct musb_dma_controller;
+
+struct musb_dma_channel {
+ struct dma_channel channel;
+ struct musb_dma_controller *controller;
+ u32 start_addr;
+ u32 len;
+ u16 max_packet_sz;
+ u8 idx;
+ u8 epnum;
+ u8 transmit;
+};
+
+struct musb_dma_controller {
+ struct dma_controller controller;
+ struct musb_dma_channel channel[MUSB_HSDMA_CHANNELS];
+ void *private_data;
+ void __iomem *base;
+ u8 channel_count;
+ u8 used_channels;
+ u8 irq;
+};
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index ce6c162920f..901dffdf23b 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -58,10 +58,10 @@ static void musb_do_idle(unsigned long _musb)
#endif
u8 devctl;
- devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
-
spin_lock_irqsave(&musb->lock, flags);
+ devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+
switch (musb->xceiv.state) {
case OTG_STATE_A_WAIT_BCON:
devctl &= ~MUSB_DEVCTL_SESSION;
@@ -196,7 +196,7 @@ static int omap_set_power(struct otg_transceiver *x, unsigned mA)
static int musb_platform_resume(struct musb *musb);
-void musb_platform_set_mode(struct musb *musb, u8 musb_mode)
+int musb_platform_set_mode(struct musb *musb, u8 musb_mode)
{
u8 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
@@ -204,15 +204,24 @@ void musb_platform_set_mode(struct musb *musb, u8 musb_mode)
musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
switch (musb_mode) {
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
case MUSB_HOST:
otg_set_host(&musb->xceiv, musb->xceiv.host);
break;
+#endif
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
case MUSB_PERIPHERAL:
otg_set_peripheral(&musb->xceiv, musb->xceiv.gadget);
break;
+#endif
+#ifdef CONFIG_USB_MUSB_OTG
case MUSB_OTG:
break;
+#endif
+ default:
+ return -EINVAL;
}
+ return 0;
}
int __init musb_platform_init(struct musb *musb)
diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c
index ee8fca92a4a..9e20fd070d7 100644
--- a/drivers/usb/musb/tusb6010.c
+++ b/drivers/usb/musb/tusb6010.c
@@ -598,7 +598,7 @@ static void tusb_source_power(struct musb *musb, int is_on)
* and peripheral modes in non-OTG configurations by reconfiguring hardware
* and then setting musb->board_mode. For now, only support OTG mode.
*/
-void musb_platform_set_mode(struct musb *musb, u8 musb_mode)
+int musb_platform_set_mode(struct musb *musb, u8 musb_mode)
{
void __iomem *tbase = musb->ctrl_base;
u32 otg_stat, phy_otg_ctrl, phy_otg_ena, dev_conf;
@@ -641,7 +641,8 @@ void musb_platform_set_mode(struct musb *musb, u8 musb_mode)
#endif
default:
- DBG(2, "Trying to set unknown mode %i\n", musb_mode);
+ DBG(2, "Trying to set mode %i\n", musb_mode);
+ return -EINVAL;
}
musb_writel(tbase, TUSB_PHY_OTG_CTRL,
@@ -655,6 +656,8 @@ void musb_platform_set_mode(struct musb *musb, u8 musb_mode)
!(otg_stat & TUSB_DEV_OTG_STAT_ID_STATUS))
INFO("Cannot be peripheral with mini-A cable "
"otg_stat: %08x\n", otg_stat);
+
+ return 0;
}
static inline unsigned long
diff --git a/drivers/usb/otg/Kconfig b/drivers/usb/otg/Kconfig
new file mode 100644
index 00000000000..8e8dbdb9b39
--- /dev/null
+++ b/drivers/usb/otg/Kconfig
@@ -0,0 +1,54 @@
+#
+# USB OTG infrastructure may be needed for peripheral-only, host-only,
+# or OTG-capable configurations when OTG transceivers or controllers
+# are used.
+#
+
+comment "OTG and related infrastructure"
+
+if USB || USB_GADGET
+
+config USB_OTG_UTILS
+ bool
+ help
+ Select this to make sure the build includes objects from
+ the OTG infrastructure directory.
+
+#
+# USB Transceiver Drivers
+#
+config USB_GPIO_VBUS
+ tristate "GPIO based peripheral-only VBUS sensing 'transceiver'"
+ depends on GENERIC_GPIO
+ select USB_OTG_UTILS
+ help
+ Provides simple GPIO VBUS sensing for controllers with an
+ internal transceiver via the otg_transceiver interface, and
+ optionally control of a D+ pullup GPIO as well as a VBUS
+ current limit regulator.
+
+config ISP1301_OMAP
+ tristate "Philips ISP1301 with OMAP OTG"
+ depends on I2C && ARCH_OMAP_OTG
+ select USB_OTG_UTILS
+ help
+ If you say yes here you get support for the Philips ISP1301
+ USB-On-The-Go transceiver working with the OMAP OTG controller.
+ The ISP1301 is a full speed USB transceiver which is used in
+ products including H2, H3, and H4 development boards for Texas
+ Instruments OMAP processors.
+
+ This driver can also be built as a module. If so, the module
+ will be called isp1301_omap.
+
+config TWL4030_USB
+ tristate "TWL4030 USB Transceiver Driver"
+ depends on TWL4030_CORE
+ select USB_OTG_UTILS
+ help
+ Enable this to support the USB OTG transceiver on TWL4030
+ family chips (including the TWL5030 and TPS659x0 devices).
+ This transceiver supports high and full speed devices plus,
+ in host mode, low speed.
+
+endif # USB || OTG
diff --git a/drivers/usb/otg/Makefile b/drivers/usb/otg/Makefile
new file mode 100644
index 00000000000..d73c7cf5e2f
--- /dev/null
+++ b/drivers/usb/otg/Makefile
@@ -0,0 +1,15 @@
+#
+# OTG infrastructure and transceiver drivers
+#
+
+# infrastructure
+obj-$(CONFIG_USB_OTG_UTILS) += otg.o
+
+# transceiver drivers
+obj-$(CONFIG_USB_GPIO_VBUS) += gpio_vbus.o
+obj-$(CONFIG_ISP1301_OMAP) += isp1301_omap.o
+obj-$(CONFIG_TWL4030_USB) += twl4030-usb.o
+
+ccflags-$(CONFIG_USB_DEBUG) += -DDEBUG
+ccflags-$(CONFIG_USB_GADGET_DEBUG) += -DDEBUG
+
diff --git a/drivers/usb/otg/gpio_vbus.c b/drivers/usb/otg/gpio_vbus.c
new file mode 100644
index 00000000000..63a6036f04b
--- /dev/null
+++ b/drivers/usb/otg/gpio_vbus.c
@@ -0,0 +1,335 @@
+/*
+ * gpio-vbus.c - simple GPIO VBUS sensing driver for B peripheral devices
+ *
+ * Copyright (c) 2008 Philipp Zabel
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include
+#include
+#include
+#include
+#include
+
+#include
+
+#include
+#include
+#include
+
+
+/*
+ * A simple GPIO VBUS sensing driver for B peripheral only devices
+ * with internal transceivers. It can control a D+ pullup GPIO and
+ * a regulator to limit the current drawn from VBUS.
+ *
+ * Needs to be loaded before the UDC driver that will use it.
+ */
+struct gpio_vbus_data {
+ struct otg_transceiver otg;
+ struct device *dev;
+ struct regulator *vbus_draw;
+ int vbus_draw_enabled;
+ unsigned mA;
+};
+
+
+/*
+ * This driver relies on "both edges" triggering. VBUS has 100 msec to
+ * stabilize, so the peripheral controller driver may need to cope with
+ * some bouncing due to current surges (e.g. charging local capacitance)
+ * and contact chatter.
+ *
+ * REVISIT in desperate straits, toggling between rising and falling
+ * edges might be workable.
+ */
+#define VBUS_IRQ_FLAGS \
+ ( IRQF_SAMPLE_RANDOM | IRQF_SHARED \
+ | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING )
+
+
+/* interface to regulator framework */
+static void set_vbus_draw(struct gpio_vbus_data *gpio_vbus, unsigned mA)
+{
+ struct regulator *vbus_draw = gpio_vbus->vbus_draw;
+ int enabled;
+
+ if (!vbus_draw)
+ return;
+
+ enabled = gpio_vbus->vbus_draw_enabled;
+ if (mA) {
+ regulator_set_current_limit(vbus_draw, 0, 1000 * mA);
+ if (!enabled) {
+ regulator_enable(vbus_draw);
+ gpio_vbus->vbus_draw_enabled = 1;
+ }
+ } else {
+ if (enabled) {
+ regulator_disable(vbus_draw);
+ gpio_vbus->vbus_draw_enabled = 0;
+ }
+ }
+ gpio_vbus->mA = mA;
+}
+
+/* VBUS change IRQ handler */
+static irqreturn_t gpio_vbus_irq(int irq, void *data)
+{
+ struct platform_device *pdev = data;
+ struct gpio_vbus_mach_info *pdata = pdev->dev.platform_data;
+ struct gpio_vbus_data *gpio_vbus = platform_get_drvdata(pdev);
+ int gpio, vbus;
+
+ vbus = gpio_get_value(pdata->gpio_vbus);
+ if (pdata->gpio_vbus_inverted)
+ vbus = !vbus;
+
+ dev_dbg(&pdev->dev, "VBUS %s (gadget: %s)\n",
+ vbus ? "supplied" : "inactive",
+ gpio_vbus->otg.gadget ? gpio_vbus->otg.gadget->name : "none");
+
+ if (!gpio_vbus->otg.gadget)
+ return IRQ_HANDLED;
+
+ /* Peripheral controllers which manage the pullup themselves won't have
+ * gpio_pullup configured here. If it's configured here, we'll do what
+ * isp1301_omap::b_peripheral() does and enable the pullup here... although
+ * that may complicate usb_gadget_{,dis}connect() support.
+ */
+ gpio = pdata->gpio_pullup;
+ if (vbus) {
+ gpio_vbus->otg.state = OTG_STATE_B_PERIPHERAL;
+ usb_gadget_vbus_connect(gpio_vbus->otg.gadget);
+
+ /* drawing a "unit load" is *always* OK, except for OTG */
+ set_vbus_draw(gpio_vbus, 100);
+
+ /* optionally enable D+ pullup */
+ if (gpio_is_valid(gpio))
+ gpio_set_value(gpio, !pdata->gpio_pullup_inverted);
+ } else {
+ /* optionally disable D+ pullup */
+ if (gpio_is_valid(gpio))
+ gpio_set_value(gpio, pdata->gpio_pullup_inverted);
+
+ set_vbus_draw(gpio_vbus, 0);
+
+ usb_gadget_vbus_disconnect(gpio_vbus->otg.gadget);
+ gpio_vbus->otg.state = OTG_STATE_B_IDLE;
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* OTG transceiver interface */
+
+/* bind/unbind the peripheral controller */
+static int gpio_vbus_set_peripheral(struct otg_transceiver *otg,
+ struct usb_gadget *gadget)
+{
+ struct gpio_vbus_data *gpio_vbus;
+ struct gpio_vbus_mach_info *pdata;
+ struct platform_device *pdev;
+ int gpio, irq;
+
+ gpio_vbus = container_of(otg, struct gpio_vbus_data, otg);
+ pdev = to_platform_device(gpio_vbus->dev);
+ pdata = gpio_vbus->dev->platform_data;
+ irq = gpio_to_irq(pdata->gpio_vbus);
+ gpio = pdata->gpio_pullup;
+
+ if (!gadget) {
+ dev_dbg(&pdev->dev, "unregistering gadget '%s'\n",
+ otg->gadget->name);
+
+ /* optionally disable D+ pullup */
+ if (gpio_is_valid(gpio))
+ gpio_set_value(gpio, pdata->gpio_pullup_inverted);
+
+ set_vbus_draw(gpio_vbus, 0);
+
+ usb_gadget_vbus_disconnect(otg->gadget);
+ otg->state = OTG_STATE_UNDEFINED;
+
+ otg->gadget = NULL;
+ return 0;
+ }
+
+ otg->gadget = gadget;
+ dev_dbg(&pdev->dev, "registered gadget '%s'\n", gadget->name);
+
+ /* initialize connection state */
+ gpio_vbus_irq(irq, pdev);
+ return 0;
+}
+
+/* effective for B devices, ignored for A-peripheral */
+static int gpio_vbus_set_power(struct otg_transceiver *otg, unsigned mA)
+{
+ struct gpio_vbus_data *gpio_vbus;
+
+ gpio_vbus = container_of(otg, struct gpio_vbus_data, otg);
+
+ if (otg->state == OTG_STATE_B_PERIPHERAL)
+ set_vbus_draw(gpio_vbus, mA);
+ return 0;
+}
+
+/* for non-OTG B devices: set/clear transceiver suspend mode */
+static int gpio_vbus_set_suspend(struct otg_transceiver *otg, int suspend)
+{
+ struct gpio_vbus_data *gpio_vbus;
+
+ gpio_vbus = container_of(otg, struct gpio_vbus_data, otg);
+
+ /* draw max 0 mA from vbus in suspend mode; or the previously
+ * recorded amount of current if not suspended
+ *
+ * NOTE: high powered configs (mA > 100) may draw up to 2.5 mA
+ * if they're wake-enabled ... we don't handle that yet.
+ */
+ return gpio_vbus_set_power(otg, suspend ? 0 : gpio_vbus->mA);
+}
+
+/* platform driver interface */
+
+static int __init gpio_vbus_probe(struct platform_device *pdev)
+{
+ struct gpio_vbus_mach_info *pdata = pdev->dev.platform_data;
+ struct gpio_vbus_data *gpio_vbus;
+ struct resource *res;
+ int err, gpio, irq;
+
+ if (!pdata || !gpio_is_valid(pdata->gpio_vbus))
+ return -EINVAL;
+ gpio = pdata->gpio_vbus;
+
+ gpio_vbus = kzalloc(sizeof(struct gpio_vbus_data), GFP_KERNEL);
+ if (!gpio_vbus)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, gpio_vbus);
+ gpio_vbus->dev = &pdev->dev;
+ gpio_vbus->otg.label = "gpio-vbus";
+ gpio_vbus->otg.state = OTG_STATE_UNDEFINED;
+ gpio_vbus->otg.set_peripheral = gpio_vbus_set_peripheral;
+ gpio_vbus->otg.set_power = gpio_vbus_set_power;
+ gpio_vbus->otg.set_suspend = gpio_vbus_set_suspend;
+
+ err = gpio_request(gpio, "vbus_detect");
+ if (err) {
+ dev_err(&pdev->dev, "can't request vbus gpio %d, err: %d\n",
+ gpio, err);
+ goto err_gpio;
+ }
+ gpio_direction_input(gpio);
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (res) {
+ irq = res->start;
+ res->flags &= IRQF_TRIGGER_MASK;
+ res->flags |= IRQF_SAMPLE_RANDOM | IRQF_SHARED;
+ } else
+ irq = gpio_to_irq(gpio);
+
+ /* if data line pullup is in use, initialize it to "not pulling up" */
+ gpio = pdata->gpio_pullup;
+ if (gpio_is_valid(gpio)) {
+ err = gpio_request(gpio, "udc_pullup");
+ if (err) {
+ dev_err(&pdev->dev,
+ "can't request pullup gpio %d, err: %d\n",
+ gpio, err);
+ gpio_free(pdata->gpio_vbus);
+ goto err_gpio;
+ }
+ gpio_direction_output(gpio, pdata->gpio_pullup_inverted);
+ }
+
+ err = request_irq(irq, gpio_vbus_irq, VBUS_IRQ_FLAGS,
+ "vbus_detect", pdev);
+ if (err) {
+ dev_err(&pdev->dev, "can't request irq %i, err: %d\n",
+ irq, err);
+ goto err_irq;
+ }
+
+ /* only active when a gadget is registered */
+ err = otg_set_transceiver(&gpio_vbus->otg);
+ if (err) {
+ dev_err(&pdev->dev, "can't register transceiver, err: %d\n",
+ err);
+ goto err_otg;
+ }
+
+ gpio_vbus->vbus_draw = regulator_get(&pdev->dev, "vbus_draw");
+ if (IS_ERR(gpio_vbus->vbus_draw)) {
+ dev_dbg(&pdev->dev, "can't get vbus_draw regulator, err: %ld\n",
+ PTR_ERR(gpio_vbus->vbus_draw));
+ gpio_vbus->vbus_draw = NULL;
+ }
+
+ return 0;
+err_otg:
+ free_irq(irq, &pdev->dev);
+err_irq:
+ if (gpio_is_valid(pdata->gpio_pullup))
+ gpio_free(pdata->gpio_pullup);
+ gpio_free(pdata->gpio_vbus);
+err_gpio:
+ platform_set_drvdata(pdev, NULL);
+ kfree(gpio_vbus);
+ return err;
+}
+
+static int __exit gpio_vbus_remove(struct platform_device *pdev)
+{
+ struct gpio_vbus_data *gpio_vbus = platform_get_drvdata(pdev);
+ struct gpio_vbus_mach_info *pdata = pdev->dev.platform_data;
+ int gpio = pdata->gpio_vbus;
+
+ regulator_put(gpio_vbus->vbus_draw);
+
+ otg_set_transceiver(NULL);
+
+ free_irq(gpio_to_irq(gpio), &pdev->dev);
+ if (gpio_is_valid(pdata->gpio_pullup))
+ gpio_free(pdata->gpio_pullup);
+ gpio_free(gpio);
+ platform_set_drvdata(pdev, NULL);
+ kfree(gpio_vbus);
+
+ return 0;
+}
+
+/* NOTE: the gpio-vbus device may *NOT* be hotplugged */
+
+MODULE_ALIAS("platform:gpio-vbus");
+
+static struct platform_driver gpio_vbus_driver = {
+ .driver = {
+ .name = "gpio-vbus",
+ .owner = THIS_MODULE,
+ },
+ .remove = __exit_p(gpio_vbus_remove),
+};
+
+static int __init gpio_vbus_init(void)
+{
+ return platform_driver_probe(&gpio_vbus_driver, gpio_vbus_probe);
+}
+module_init(gpio_vbus_init);
+
+static void __exit gpio_vbus_exit(void)
+{
+ platform_driver_unregister(&gpio_vbus_driver);
+}
+module_exit(gpio_vbus_exit);
+
+MODULE_DESCRIPTION("simple GPIO controlled OTG transceiver driver");
+MODULE_AUTHOR("Philipp Zabel");
+MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/chips/isp1301_omap.c b/drivers/usb/otg/isp1301_omap.c
similarity index 100%
rename from drivers/i2c/chips/isp1301_omap.c
rename to drivers/usb/otg/isp1301_omap.c
diff --git a/drivers/usb/otg/otg.c b/drivers/usb/otg/otg.c
new file mode 100644
index 00000000000..ff318fae7d4
--- /dev/null
+++ b/drivers/usb/otg/otg.c
@@ -0,0 +1,65 @@
+/*
+ * otg.c -- USB OTG utility code
+ *
+ * Copyright (C) 2004 Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include
+#include
+
+#include
+
+static struct otg_transceiver *xceiv;
+
+/**
+ * otg_get_transceiver - find the (single) OTG transceiver
+ *
+ * Returns the transceiver driver, after getting a refcount to it; or
+ * null if there is no such transceiver. The caller is responsible for
+ * calling otg_put_transceiver() to release that count.
+ *
+ * For use by USB host and peripheral drivers.
+ */
+struct otg_transceiver *otg_get_transceiver(void)
+{
+ if (xceiv)
+ get_device(xceiv->dev);
+ return xceiv;
+}
+EXPORT_SYMBOL(otg_get_transceiver);
+
+/**
+ * otg_put_transceiver - release the (single) OTG transceiver
+ * @x: the transceiver returned by otg_get_transceiver()
+ *
+ * Releases a refcount the caller received from otg_get_transceiver().
+ *
+ * For use by USB host and peripheral drivers.
+ */
+void otg_put_transceiver(struct otg_transceiver *x)
+{
+ put_device(x->dev);
+}
+EXPORT_SYMBOL(otg_put_transceiver);
+
+/**
+ * otg_set_transceiver - declare the (single) OTG transceiver
+ * @x: the USB OTG transceiver to be used; or NULL
+ *
+ * This call is exclusively for use by transceiver drivers, which
+ * coordinate the activities of drivers for host and peripheral
+ * controllers, and in some cases for VBUS current regulation.
+ */
+int otg_set_transceiver(struct otg_transceiver *x)
+{
+ if (xceiv && x)
+ return -EBUSY;
+ xceiv = x;
+ return 0;
+}
+EXPORT_SYMBOL(otg_set_transceiver);
diff --git a/drivers/usb/otg/twl4030-usb.c b/drivers/usb/otg/twl4030-usb.c
new file mode 100644
index 00000000000..416e4410be0
--- /dev/null
+++ b/drivers/usb/otg/twl4030-usb.c
@@ -0,0 +1,721 @@
+/*
+ * twl4030_usb - TWL4030 USB transceiver, talking to OMAP OTG controller
+ *
+ * Copyright (C) 2004-2007 Texas Instruments
+ * Copyright (C) 2008 Nokia Corporation
+ * Contact: Felipe Balbi
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Current status:
+ * - HS USB ULPI mode works.
+ * - 3-pin mode support may be added in future.
+ */
+
+#include
+#include
+#include
+#include