mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
Merge branch 'master'
This commit is contained in:
commit
c6a756795d
330 changed files with 6810 additions and 2000 deletions
9
CREDITS
9
CREDITS
|
@ -3241,14 +3241,9 @@ S: 12725 SW Millikan Way, Suite 400
|
|||
S: Beaverton, Oregon 97005
|
||||
S: USA
|
||||
|
||||
N: Marcelo W. Tosatti
|
||||
E: marcelo.tosatti@cyclades.com
|
||||
D: Miscellaneous kernel hacker
|
||||
N: Marcelo Tosatti
|
||||
E: marcelo@kvack.org
|
||||
D: v2.4 kernel maintainer
|
||||
D: Current pc300/cyclades maintainer
|
||||
S: Cyclades Corporation
|
||||
S: Av Cristovao Colombo, 462. Floresta.
|
||||
S: Porto Alegre
|
||||
S: Brazil
|
||||
|
||||
N: Stefan Traby
|
||||
|
|
|
@ -1721,11 +1721,6 @@ Your cooperation is appreciated.
|
|||
These devices support the same API as the generic SCSI
|
||||
devices.
|
||||
|
||||
97 block Packet writing for CD/DVD devices
|
||||
0 = /dev/pktcdvd0 First packet-writing module
|
||||
1 = /dev/pktcdvd1 Second packet-writing module
|
||||
...
|
||||
|
||||
98 char Control and Measurement Device (comedi)
|
||||
0 = /dev/comedi0 First comedi device
|
||||
1 = /dev/comedi1 Second comedi device
|
||||
|
|
|
@ -259,9 +259,9 @@ sub dibusb {
|
|||
}
|
||||
|
||||
sub nxt2002 {
|
||||
my $sourcefile = "Broadband4PC_4_2_11.zip";
|
||||
my $sourcefile = "Technisat_DVB-PC_4_4_COMPACT.zip";
|
||||
my $url = "http://www.bbti.us/download/windows/$sourcefile";
|
||||
my $hash = "c6d2ea47a8f456d887ada0cfb718ff2a";
|
||||
my $hash = "476befae8c7c1bb9648954060b1eec1f";
|
||||
my $outfile = "dvb-fe-nxt2002.fw";
|
||||
my $tmpdir = tempdir(DIR => "/tmp", CLEANUP => 1);
|
||||
|
||||
|
@ -269,8 +269,8 @@ sub nxt2002 {
|
|||
|
||||
wgetfile($sourcefile, $url);
|
||||
unzip($sourcefile, $tmpdir);
|
||||
verify("$tmpdir/SkyNETU.sys", $hash);
|
||||
extract("$tmpdir/SkyNETU.sys", 375832, 5908, $outfile);
|
||||
verify("$tmpdir/SkyNET.sys", $hash);
|
||||
extract("$tmpdir/SkyNET.sys", 331624, 5908, $outfile);
|
||||
|
||||
$outfile;
|
||||
}
|
||||
|
|
|
@ -57,6 +57,15 @@ Who: Jody McIntyre <scjody@steamballoon.com>
|
|||
|
||||
---------------------------
|
||||
|
||||
What: sbp2: module parameter "force_inquiry_hack"
|
||||
When: July 2006
|
||||
Why: Superceded by parameter "workarounds". Both parameters are meant to be
|
||||
used ad-hoc and for single devices only, i.e. not in modprobe.conf,
|
||||
therefore the impact of this feature replacement should be low.
|
||||
Who: Stefan Richter <stefanr@s5r6.in-berlin.de>
|
||||
|
||||
---------------------------
|
||||
|
||||
What: Video4Linux API 1 ioctls and video_decoder.h from Video devices.
|
||||
When: July 2006
|
||||
Why: V4L1 AP1 was replaced by V4L2 API. during migration from 2.4 to 2.6
|
||||
|
|
|
@ -105,20 +105,3 @@
|
|||
on the setup, so I think that the choice on what firmware to make
|
||||
persistent should be left to userspace.
|
||||
|
||||
- Why register_firmware()+__init can be useful:
|
||||
- For boot devices needing firmware.
|
||||
- To make the transition easier:
|
||||
The firmware can be declared __init and register_firmware()
|
||||
called on module_init. Then the firmware is warranted to be
|
||||
there even if "firmware hotplug userspace" is not there yet or
|
||||
it doesn't yet provide the needed firmware.
|
||||
Once the firmware is widely available in userspace, it can be
|
||||
removed from the kernel. Or made optional (CONFIG_.*_FIRMWARE).
|
||||
|
||||
In either case, if firmware hotplug support is there, it can move the
|
||||
firmware out of kernel memory into the real filesystem for later
|
||||
usage.
|
||||
|
||||
Note: If persistence is implemented on top of initramfs,
|
||||
register_firmware() may not be appropriate.
|
||||
|
||||
|
|
|
@ -5,8 +5,6 @@
|
|||
*
|
||||
* Sample code on how to use request_firmware() from drivers.
|
||||
*
|
||||
* Note that register_firmware() is currently useless.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
|
@ -17,11 +15,6 @@
|
|||
|
||||
#include "linux/firmware.h"
|
||||
|
||||
#define WE_CAN_NEED_FIRMWARE_BEFORE_USERSPACE_IS_AVAILABLE
|
||||
#ifdef WE_CAN_NEED_FIRMWARE_BEFORE_USERSPACE_IS_AVAILABLE
|
||||
char __init inkernel_firmware[] = "let's say that this is firmware\n";
|
||||
#endif
|
||||
|
||||
static struct device ghost_device = {
|
||||
.bus_id = "ghost0",
|
||||
};
|
||||
|
@ -104,10 +97,6 @@ static void sample_probe_async(void)
|
|||
|
||||
static int sample_init(void)
|
||||
{
|
||||
#ifdef WE_CAN_NEED_FIRMWARE_BEFORE_USERSPACE_IS_AVAILABLE
|
||||
register_firmware("sample_driver_fw", inkernel_firmware,
|
||||
sizeof(inkernel_firmware));
|
||||
#endif
|
||||
device_initialize(&ghost_device);
|
||||
/* since there is no real hardware insertion I just call the
|
||||
* sample probe functions here */
|
||||
|
|
|
@ -1031,7 +1031,7 @@ conflict on any particular lock.
|
|||
LOCKS VS MEMORY ACCESSES
|
||||
------------------------
|
||||
|
||||
Consider the following: the system has a pair of spinlocks (N) and (Q), and
|
||||
Consider the following: the system has a pair of spinlocks (M) and (Q), and
|
||||
three CPUs; then should the following sequence of events occur:
|
||||
|
||||
CPU 1 CPU 2
|
||||
|
@ -1678,7 +1678,7 @@ CPU's caches by some other cache event:
|
|||
smp_wmb();
|
||||
<A:modify v=2> <C:busy>
|
||||
<C:queue v=2>
|
||||
p = &b; q = p;
|
||||
p = &v; q = p;
|
||||
<D:request p>
|
||||
<B:modify p=&v> <D:commit p=&v>
|
||||
<D:read p>
|
||||
|
|
234
Documentation/spi/pxa2xx
Normal file
234
Documentation/spi/pxa2xx
Normal file
|
@ -0,0 +1,234 @@
|
|||
PXA2xx SPI on SSP driver HOWTO
|
||||
===================================================
|
||||
This a mini howto on the pxa2xx_spi driver. The driver turns a PXA2xx
|
||||
synchronous serial port into a SPI master controller
|
||||
(see Documentation/spi/spi_summary). The driver has the following features
|
||||
|
||||
- Support for any PXA2xx SSP
|
||||
- SSP PIO and SSP DMA data transfers.
|
||||
- External and Internal (SSPFRM) chip selects.
|
||||
- Per slave device (chip) configuration.
|
||||
- Full suspend, freeze, resume support.
|
||||
|
||||
The driver is built around a "spi_message" fifo serviced by workqueue and a
|
||||
tasklet. The workqueue, "pump_messages", drives message fifo and the tasklet
|
||||
(pump_transfer) is responsible for queuing SPI transactions and setting up and
|
||||
launching the dma/interrupt driven transfers.
|
||||
|
||||
Declaring PXA2xx Master Controllers
|
||||
-----------------------------------
|
||||
Typically a SPI master is defined in the arch/.../mach-*/board-*.c as a
|
||||
"platform device". The master configuration is passed to the driver via a table
|
||||
found in include/asm-arm/arch-pxa/pxa2xx_spi.h:
|
||||
|
||||
struct pxa2xx_spi_master {
|
||||
enum pxa_ssp_type ssp_type;
|
||||
u32 clock_enable;
|
||||
u16 num_chipselect;
|
||||
u8 enable_dma;
|
||||
};
|
||||
|
||||
The "pxa2xx_spi_master.ssp_type" field must have a value between 1 and 3 and
|
||||
informs the driver which features a particular SSP supports.
|
||||
|
||||
The "pxa2xx_spi_master.clock_enable" field is used to enable/disable the
|
||||
corresponding SSP peripheral block in the "Clock Enable Register (CKEN"). See
|
||||
the "PXA2xx Developer Manual" section "Clocks and Power Management".
|
||||
|
||||
The "pxa2xx_spi_master.num_chipselect" field is used to determine the number of
|
||||
slave device (chips) attached to this SPI master.
|
||||
|
||||
The "pxa2xx_spi_master.enable_dma" field informs the driver that SSP DMA should
|
||||
be used. This caused the driver to acquire two DMA channels: rx_channel and
|
||||
tx_channel. The rx_channel has a higher DMA service priority the tx_channel.
|
||||
See the "PXA2xx Developer Manual" section "DMA Controller".
|
||||
|
||||
NSSP MASTER SAMPLE
|
||||
------------------
|
||||
Below is a sample configuration using the PXA255 NSSP.
|
||||
|
||||
static struct resource pxa_spi_nssp_resources[] = {
|
||||
[0] = {
|
||||
.start = __PREG(SSCR0_P(2)), /* Start address of NSSP */
|
||||
.end = __PREG(SSCR0_P(2)) + 0x2c, /* Range of registers */
|
||||
.flags = IORESOURCE_MEM,
|
||||
},
|
||||
[1] = {
|
||||
.start = IRQ_NSSP, /* NSSP IRQ */
|
||||
.end = IRQ_NSSP,
|
||||
.flags = IORESOURCE_IRQ,
|
||||
},
|
||||
};
|
||||
|
||||
static struct pxa2xx_spi_master pxa_nssp_master_info = {
|
||||
.ssp_type = PXA25x_NSSP, /* Type of SSP */
|
||||
.clock_enable = CKEN9_NSSP, /* NSSP Peripheral clock */
|
||||
.num_chipselect = 1, /* Matches the number of chips attached to NSSP */
|
||||
.enable_dma = 1, /* Enables NSSP DMA */
|
||||
};
|
||||
|
||||
static struct platform_device pxa_spi_nssp = {
|
||||
.name = "pxa2xx-spi", /* MUST BE THIS VALUE, so device match driver */
|
||||
.id = 2, /* Bus number, MUST MATCH SSP number 1..n */
|
||||
.resource = pxa_spi_nssp_resources,
|
||||
.num_resources = ARRAY_SIZE(pxa_spi_nssp_resources),
|
||||
.dev = {
|
||||
.platform_data = &pxa_nssp_master_info, /* Passed to driver */
|
||||
},
|
||||
};
|
||||
|
||||
static struct platform_device *devices[] __initdata = {
|
||||
&pxa_spi_nssp,
|
||||
};
|
||||
|
||||
static void __init board_init(void)
|
||||
{
|
||||
(void)platform_add_device(devices, ARRAY_SIZE(devices));
|
||||
}
|
||||
|
||||
Declaring Slave Devices
|
||||
-----------------------
|
||||
Typically each SPI slave (chip) is defined in the arch/.../mach-*/board-*.c
|
||||
using the "spi_board_info" structure found in "linux/spi/spi.h". See
|
||||
"Documentation/spi/spi_summary" for additional information.
|
||||
|
||||
Each slave device attached to the PXA must provide slave specific configuration
|
||||
information via the structure "pxa2xx_spi_chip" found in
|
||||
"include/asm-arm/arch-pxa/pxa2xx_spi.h". The pxa2xx_spi master controller driver
|
||||
will uses the configuration whenever the driver communicates with the slave
|
||||
device.
|
||||
|
||||
struct pxa2xx_spi_chip {
|
||||
u8 tx_threshold;
|
||||
u8 rx_threshold;
|
||||
u8 dma_burst_size;
|
||||
u32 timeout_microsecs;
|
||||
u8 enable_loopback;
|
||||
void (*cs_control)(u32 command);
|
||||
};
|
||||
|
||||
The "pxa2xx_spi_chip.tx_threshold" and "pxa2xx_spi_chip.rx_threshold" fields are
|
||||
used to configure the SSP hardware fifo. These fields are critical to the
|
||||
performance of pxa2xx_spi driver and misconfiguration will result in rx
|
||||
fifo overruns (especially in PIO mode transfers). Good default values are
|
||||
|
||||
.tx_threshold = 12,
|
||||
.rx_threshold = 4,
|
||||
|
||||
The "pxa2xx_spi_chip.dma_burst_size" field is used to configure PXA2xx DMA
|
||||
engine and is related the "spi_device.bits_per_word" field. Read and understand
|
||||
the PXA2xx "Developer Manual" sections on the DMA controller and SSP Controllers
|
||||
to determine the correct value. An SSP configured for byte-wide transfers would
|
||||
use a value of 8.
|
||||
|
||||
The "pxa2xx_spi_chip.timeout_microsecs" fields is used to efficiently handle
|
||||
trailing bytes in the SSP receiver fifo. The correct value for this field is
|
||||
dependent on the SPI bus speed ("spi_board_info.max_speed_hz") and the specific
|
||||
slave device. Please note the the PXA2xx SSP 1 does not support trailing byte
|
||||
timeouts and must busy-wait any trailing bytes.
|
||||
|
||||
The "pxa2xx_spi_chip.enable_loopback" field is used to place the SSP porting
|
||||
into internal loopback mode. In this mode the SSP controller internally
|
||||
connects the SSPTX pin the the SSPRX pin. This is useful for initial setup
|
||||
testing.
|
||||
|
||||
The "pxa2xx_spi_chip.cs_control" field is used to point to a board specific
|
||||
function for asserting/deasserting a slave device chip select. If the field is
|
||||
NULL, the pxa2xx_spi master controller driver assumes that the SSP port is
|
||||
configured to use SSPFRM instead.
|
||||
|
||||
NSSP SALVE SAMPLE
|
||||
-----------------
|
||||
The pxa2xx_spi_chip structure is passed to the pxa2xx_spi driver in the
|
||||
"spi_board_info.controller_data" field. Below is a sample configuration using
|
||||
the PXA255 NSSP.
|
||||
|
||||
/* Chip Select control for the CS8415A SPI slave device */
|
||||
static void cs8415a_cs_control(u32 command)
|
||||
{
|
||||
if (command & PXA2XX_CS_ASSERT)
|
||||
GPCR(2) = GPIO_bit(2);
|
||||
else
|
||||
GPSR(2) = GPIO_bit(2);
|
||||
}
|
||||
|
||||
/* Chip Select control for the CS8405A SPI slave device */
|
||||
static void cs8405a_cs_control(u32 command)
|
||||
{
|
||||
if (command & PXA2XX_CS_ASSERT)
|
||||
GPCR(3) = GPIO_bit(3);
|
||||
else
|
||||
GPSR(3) = GPIO_bit(3);
|
||||
}
|
||||
|
||||
static struct pxa2xx_spi_chip cs8415a_chip_info = {
|
||||
.tx_threshold = 12, /* SSP hardward FIFO threshold */
|
||||
.rx_threshold = 4, /* SSP hardward FIFO threshold */
|
||||
.dma_burst_size = 8, /* Byte wide transfers used so 8 byte bursts */
|
||||
.timeout_microsecs = 64, /* Wait at least 64usec to handle trailing */
|
||||
.cs_control = cs8415a_cs_control, /* Use external chip select */
|
||||
};
|
||||
|
||||
static struct pxa2xx_spi_chip cs8405a_chip_info = {
|
||||
.tx_threshold = 12, /* SSP hardward FIFO threshold */
|
||||
.rx_threshold = 4, /* SSP hardward FIFO threshold */
|
||||
.dma_burst_size = 8, /* Byte wide transfers used so 8 byte bursts */
|
||||
.timeout_microsecs = 64, /* Wait at least 64usec to handle trailing */
|
||||
.cs_control = cs8405a_cs_control, /* Use external chip select */
|
||||
};
|
||||
|
||||
static struct spi_board_info streetracer_spi_board_info[] __initdata = {
|
||||
{
|
||||
.modalias = "cs8415a", /* Name of spi_driver for this device */
|
||||
.max_speed_hz = 3686400, /* Run SSP as fast a possbile */
|
||||
.bus_num = 2, /* Framework bus number */
|
||||
.chip_select = 0, /* Framework chip select */
|
||||
.platform_data = NULL; /* No spi_driver specific config */
|
||||
.controller_data = &cs8415a_chip_info, /* Master chip config */
|
||||
.irq = STREETRACER_APCI_IRQ, /* Slave device interrupt */
|
||||
},
|
||||
{
|
||||
.modalias = "cs8405a", /* Name of spi_driver for this device */
|
||||
.max_speed_hz = 3686400, /* Run SSP as fast a possbile */
|
||||
.bus_num = 2, /* Framework bus number */
|
||||
.chip_select = 1, /* Framework chip select */
|
||||
.controller_data = &cs8405a_chip_info, /* Master chip config */
|
||||
.irq = STREETRACER_APCI_IRQ, /* Slave device interrupt */
|
||||
},
|
||||
};
|
||||
|
||||
static void __init streetracer_init(void)
|
||||
{
|
||||
spi_register_board_info(streetracer_spi_board_info,
|
||||
ARRAY_SIZE(streetracer_spi_board_info));
|
||||
}
|
||||
|
||||
|
||||
DMA and PIO I/O Support
|
||||
-----------------------
|
||||
The pxa2xx_spi driver support both DMA and interrupt driven PIO message
|
||||
transfers. The driver defaults to PIO mode and DMA transfers must enabled by
|
||||
setting the "enable_dma" flag in the "pxa2xx_spi_master" structure and and
|
||||
ensuring that the "pxa2xx_spi_chip.dma_burst_size" field is non-zero. The DMA
|
||||
mode support both coherent and stream based DMA mappings.
|
||||
|
||||
The following logic is used to determine the type of I/O to be used on
|
||||
a per "spi_transfer" basis:
|
||||
|
||||
if !enable_dma or dma_burst_size == 0 then
|
||||
always use PIO transfers
|
||||
|
||||
if spi_message.is_dma_mapped and rx_dma_buf != 0 and tx_dma_buf != 0 then
|
||||
use coherent DMA mode
|
||||
|
||||
if rx_buf and tx_buf are aligned on 8 byte boundary then
|
||||
use streaming DMA mode
|
||||
|
||||
otherwise
|
||||
use PIO transfer
|
||||
|
||||
THANKS TO
|
||||
---------
|
||||
|
||||
David Brownell and others for mentoring the development of this driver.
|
||||
|
|
@ -414,7 +414,33 @@ to get the driver-private data allocated for that device.
|
|||
The driver will initialize the fields of that spi_master, including the
|
||||
bus number (maybe the same as the platform device ID) and three methods
|
||||
used to interact with the SPI core and SPI protocol drivers. It will
|
||||
also initialize its own internal state.
|
||||
also initialize its own internal state. (See below about bus numbering
|
||||
and those methods.)
|
||||
|
||||
After you initialize the spi_master, then use spi_register_master() to
|
||||
publish it to the rest of the system. At that time, device nodes for
|
||||
the controller and any predeclared spi devices will be made available,
|
||||
and the driver model core will take care of binding them to drivers.
|
||||
|
||||
If you need to remove your SPI controller driver, spi_unregister_master()
|
||||
will reverse the effect of spi_register_master().
|
||||
|
||||
|
||||
BUS NUMBERING
|
||||
|
||||
Bus numbering is important, since that's how Linux identifies a given
|
||||
SPI bus (shared SCK, MOSI, MISO). Valid bus numbers start at zero. On
|
||||
SOC systems, the bus numbers should match the numbers defined by the chip
|
||||
manufacturer. For example, hardware controller SPI2 would be bus number 2,
|
||||
and spi_board_info for devices connected to it would use that number.
|
||||
|
||||
If you don't have such hardware-assigned bus number, and for some reason
|
||||
you can't just assign them, then provide a negative bus number. That will
|
||||
then be replaced by a dynamically assigned number. You'd then need to treat
|
||||
this as a non-static configuration (see above).
|
||||
|
||||
|
||||
SPI MASTER METHODS
|
||||
|
||||
master->setup(struct spi_device *spi)
|
||||
This sets up the device clock rate, SPI mode, and word sizes.
|
||||
|
@ -431,6 +457,9 @@ also initialize its own internal state.
|
|||
state it dynamically associates with that device. If you do that,
|
||||
be sure to provide the cleanup() method to free that state.
|
||||
|
||||
|
||||
SPI MESSAGE QUEUE
|
||||
|
||||
The bulk of the driver will be managing the I/O queue fed by transfer().
|
||||
|
||||
That queue could be purely conceptual. For example, a driver used only
|
||||
|
@ -440,6 +469,9 @@ But the queue will probably be very real, using message->queue, PIO,
|
|||
often DMA (especially if the root filesystem is in SPI flash), and
|
||||
execution contexts like IRQ handlers, tasklets, or workqueues (such
|
||||
as keventd). Your driver can be as fancy, or as simple, as you need.
|
||||
Such a transfer() method would normally just add the message to a
|
||||
queue, and then start some asynchronous transfer engine (unless it's
|
||||
already running).
|
||||
|
||||
|
||||
THANKS TO
|
||||
|
|
|
@ -36,6 +36,9 @@ timeout or margin. The simplest way to ping the watchdog is to write
|
|||
some data to the device. So a very simple watchdog daemon would look
|
||||
like this:
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <fcntl.h>
|
||||
|
||||
int main(int argc, const char *argv[]) {
|
||||
int fd=open("/dev/watchdog",O_WRONLY);
|
||||
if (fd==-1) {
|
||||
|
|
42
MAINTAINERS
42
MAINTAINERS
|
@ -40,11 +40,20 @@ trivial patch so apply some common sense.
|
|||
PLEASE document known bugs. If it doesn't work for everything
|
||||
or does something very odd once a month document it.
|
||||
|
||||
PLEASE remember that submissions must be made under the terms
|
||||
of the OSDL certificate of contribution
|
||||
(http://www.osdl.org/newsroom/press_releases/2004/2004_05_24_dco.html)
|
||||
and should include a Signed-off-by: line.
|
||||
|
||||
6. Make sure you have the right to send any changes you make. If you
|
||||
do changes at work you may find your employer owns the patch
|
||||
not you.
|
||||
|
||||
7. Happy hacking.
|
||||
7. When sending security related changes or reports to a maintainer
|
||||
please Cc: security@kernel.org, especially if the maintainer
|
||||
does not respond.
|
||||
|
||||
8. Happy hacking.
|
||||
|
||||
-----------------------------------
|
||||
|
||||
|
@ -979,7 +988,7 @@ S: Maintained
|
|||
EXT3 FILE SYSTEM
|
||||
P: Stephen Tweedie, Andrew Morton
|
||||
M: sct@redhat.com, akpm@osdl.org, adilger@clusterfs.com
|
||||
L: ext3-users@redhat.com
|
||||
L: ext2-devel@lists.sourceforge.net
|
||||
S: Maintained
|
||||
|
||||
F71805F HARDWARE MONITORING DRIVER
|
||||
|
@ -1548,12 +1557,28 @@ W: http://jfs.sourceforge.net/
|
|||
T: git kernel.org:/pub/scm/linux/kernel/git/shaggy/jfs-2.6.git
|
||||
S: Supported
|
||||
|
||||
JOURNALLING LAYER FOR BLOCK DEVICS (JBD)
|
||||
P: Stephen Tweedie, Andrew Morton
|
||||
M: sct@redhat.com, akpm@osdl.org
|
||||
L: ext2-devel@lists.sourceforge.net
|
||||
S: Maintained
|
||||
|
||||
KCONFIG
|
||||
P: Roman Zippel
|
||||
M: zippel@linux-m68k.org
|
||||
L: kbuild-devel@lists.sourceforge.net
|
||||
S: Maintained
|
||||
|
||||
KDUMP
|
||||
P: Vivek Goyal
|
||||
M: vgoyal@in.ibm.com
|
||||
P: Haren Myneni
|
||||
M: hbabu@us.ibm.com
|
||||
L: fastboot@lists.osdl.org
|
||||
L: linux-kernel@vger.kernel.org
|
||||
W: http://lse.sourceforge.net/kdump/
|
||||
S: Maintained
|
||||
|
||||
KERNEL AUTOMOUNTER (AUTOFS)
|
||||
P: H. Peter Anvin
|
||||
M: hpa@zytor.com
|
||||
|
@ -1621,6 +1646,11 @@ M: James.Bottomley@HansenPartnership.com
|
|||
L: linux-scsi@vger.kernel.org
|
||||
S: Maintained
|
||||
|
||||
LED SUBSYSTEM
|
||||
P: Richard Purdie
|
||||
M: rpurdie@rpsys.net
|
||||
S: Maintained
|
||||
|
||||
LEGO USB Tower driver
|
||||
P: Juergen Stuber
|
||||
M: starblue@users.sourceforge.net
|
||||
|
@ -1680,7 +1710,7 @@ S: Maintained
|
|||
|
||||
LINUX FOR POWERPC EMBEDDED PPC8XX
|
||||
P: Marcelo Tosatti
|
||||
M: marcelo.tosatti@cyclades.com
|
||||
M: marcelo@kvack.org
|
||||
W: http://www.penguinppc.org/
|
||||
L: linuxppc-embedded@ozlabs.org
|
||||
S: Maintained
|
||||
|
@ -2531,6 +2561,12 @@ M: perex@suse.cz
|
|||
L: alsa-devel@alsa-project.org
|
||||
S: Maintained
|
||||
|
||||
SPI SUBSYSTEM
|
||||
P: David Brownell
|
||||
M: dbrownell@users.sourceforge.net
|
||||
L: spi-devel-general@lists.sourceforge.net
|
||||
S: Maintained
|
||||
|
||||
TPM DEVICE DRIVER
|
||||
P: Kylene Hall
|
||||
M: kjhall@us.ibm.com
|
||||
|
|
4
Makefile
4
Makefile
|
@ -1,8 +1,8 @@
|
|||
VERSION = 2
|
||||
PATCHLEVEL = 6
|
||||
SUBLEVEL = 17
|
||||
EXTRAVERSION =-rc4
|
||||
NAME=Sliding Snow Leopard
|
||||
EXTRAVERSION =-rc5
|
||||
NAME=Lordi Rules
|
||||
|
||||
# *DOCUMENTATION*
|
||||
# To see a list of typical targets execute "make help"
|
||||
|
|
|
@ -99,6 +99,8 @@ int main(void)
|
|||
DEFINE(MACHINFO_NAME, offsetof(struct machine_desc, name));
|
||||
DEFINE(MACHINFO_PHYSIO, offsetof(struct machine_desc, phys_io));
|
||||
DEFINE(MACHINFO_PGOFFIO, offsetof(struct machine_desc, io_pg_offst));
|
||||
BLANK();
|
||||
DEFINE(PROC_INFO_SZ, sizeof(struct proc_info_list));
|
||||
DEFINE(PROCINFO_INITFUNC, offsetof(struct proc_info_list, __cpu_flush));
|
||||
DEFINE(PROCINFO_MMUFLAGS, offsetof(struct proc_info_list, __cpu_mmu_flags));
|
||||
return 0;
|
||||
|
|
|
@ -143,12 +143,23 @@ static struct dma_ops isa_dma_ops = {
|
|||
.residue = isa_get_dma_residue,
|
||||
};
|
||||
|
||||
static struct resource dma_resources[] = {
|
||||
{ "dma1", 0x0000, 0x000f },
|
||||
{ "dma low page", 0x0080, 0x008f },
|
||||
{ "dma2", 0x00c0, 0x00df },
|
||||
{ "dma high page", 0x0480, 0x048f }
|
||||
};
|
||||
static struct resource dma_resources[] = { {
|
||||
.name = "dma1",
|
||||
.start = 0x0000,
|
||||
.end = 0x000f
|
||||
}, {
|
||||
.name = "dma low page",
|
||||
.start = 0x0080,
|
||||
.end = 0x008f
|
||||
}, {
|
||||
.name = "dma2",
|
||||
.start = 0x00c0,
|
||||
.end = 0x00df
|
||||
}, {
|
||||
.name = "dma high page",
|
||||
.start = 0x0480,
|
||||
.end = 0x048f
|
||||
} };
|
||||
|
||||
void __init isa_init_dma(dma_t *dma)
|
||||
{
|
||||
|
|
|
@ -311,7 +311,7 @@ void free_thread_info(struct thread_info *thread)
|
|||
struct thread_info_list *th = &get_cpu_var(thread_info_list);
|
||||
if (th->nr < EXTRA_TASK_STRUCT) {
|
||||
unsigned long *p = (unsigned long *)thread;
|
||||
p[0] = th->head;
|
||||
p[0] = (unsigned long)th->head;
|
||||
th->head = p;
|
||||
th->nr += 1;
|
||||
put_cpu_var(thread_info_list);
|
||||
|
|
|
@ -122,7 +122,7 @@ ENTRY(c_backtrace)
|
|||
#define reg r5
|
||||
#define stack r6
|
||||
|
||||
.Ldumpstm: stmfd sp!, {instr, reg, stack, r7, lr}
|
||||
.Ldumpstm: stmfd sp!, {instr, reg, stack, r7, r8, lr}
|
||||
mov stack, r0
|
||||
mov instr, r1
|
||||
mov reg, #9
|
||||
|
@ -145,7 +145,7 @@ ENTRY(c_backtrace)
|
|||
adrne r0, .Lcr
|
||||
blne printk
|
||||
mov r0, stack
|
||||
LOADREGS(fd, sp!, {instr, reg, stack, r7, pc})
|
||||
LOADREGS(fd, sp!, {instr, reg, stack, r7, r8, pc})
|
||||
|
||||
.Lfp: .asciz " r%d = %08X%c"
|
||||
.Lcr: .asciz "\n"
|
||||
|
|
|
@ -189,12 +189,12 @@ ENTRY(__do_div64)
|
|||
moveq pc, lr
|
||||
|
||||
@ Division by 0:
|
||||
str lr, [sp, #-4]!
|
||||
str lr, [sp, #-8]!
|
||||
bl __div0
|
||||
|
||||
@ as wrong as it could be...
|
||||
mov yl, #0
|
||||
mov yh, #0
|
||||
mov xh, #0
|
||||
ldr pc, [sp], #4
|
||||
ldr pc, [sp], #8
|
||||
|
||||
|
|
|
@ -95,7 +95,10 @@ static void __init mainstone_init_irq(void)
|
|||
for(irq = MAINSTONE_IRQ(0); irq <= MAINSTONE_IRQ(15); irq++) {
|
||||
set_irq_chip(irq, &mainstone_irq_chip);
|
||||
set_irq_handler(irq, do_level_IRQ);
|
||||
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
|
||||
if (irq == MAINSTONE_IRQ(10) || irq == MAINSTONE_IRQ(14))
|
||||
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE | IRQF_NOAUTOEN);
|
||||
else
|
||||
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
|
||||
}
|
||||
set_irq_flags(MAINSTONE_IRQ(8), 0);
|
||||
set_irq_flags(MAINSTONE_IRQ(12), 0);
|
||||
|
|
|
@ -137,8 +137,11 @@ static struct amba_device *amba_devs[] __initdata = {
|
|||
static void __init gic_init_irq(void)
|
||||
{
|
||||
#ifdef CONFIG_REALVIEW_MPCORE
|
||||
unsigned int pldctrl;
|
||||
writel(0x0000a05f, __io_address(REALVIEW_SYS_LOCK));
|
||||
writel(0x008003c0, __io_address(REALVIEW_SYS_BASE) + 0xd8);
|
||||
pldctrl = readl(__io_address(REALVIEW_SYS_BASE) + 0xd8);
|
||||
pldctrl |= 0x00800000; /* New irq mode */
|
||||
writel(pldctrl, __io_address(REALVIEW_SYS_BASE) + 0xd8);
|
||||
writel(0x00000000, __io_address(REALVIEW_SYS_LOCK));
|
||||
#endif
|
||||
gic_dist_init(__io_address(REALVIEW_GIC_DIST_BASE));
|
||||
|
|
|
@ -59,8 +59,7 @@ ENTRY(s3c2410_cpu_suspend)
|
|||
mrc p15, 0, r5, c13, c0, 0 @ PID
|
||||
mrc p15, 0, r6, c3, c0, 0 @ Domain ID
|
||||
mrc p15, 0, r7, c2, c0, 0 @ translation table base address
|
||||
mrc p15, 0, r8, c2, c0, 0 @ auxiliary control register
|
||||
mrc p15, 0, r9, c1, c0, 0 @ control register
|
||||
mrc p15, 0, r8, c1, c0, 0 @ control register
|
||||
|
||||
stmia r0, { r4 - r13 }
|
||||
|
||||
|
@ -165,7 +164,6 @@ ENTRY(s3c2410_cpu_resume)
|
|||
mcr p15, 0, r5, c13, c0, 0 @ PID
|
||||
mcr p15, 0, r6, c3, c0, 0 @ Domain ID
|
||||
mcr p15, 0, r7, c2, c0, 0 @ translation table base
|
||||
mcr p15, 0, r8, c1, c1, 0 @ auxilliary control
|
||||
|
||||
#ifdef CONFIG_DEBUG_RESUME
|
||||
mov r3, #'R'
|
||||
|
@ -173,7 +171,7 @@ ENTRY(s3c2410_cpu_resume)
|
|||
#endif
|
||||
|
||||
ldr r2, =resume_with_mmu
|
||||
mcr p15, 0, r9, c1, c0, 0 @ turn on MMU, etc
|
||||
mcr p15, 0, r8, c1, c0, 0 @ turn on MMU, etc
|
||||
nop @ second-to-last before mmu
|
||||
mov pc, r2 @ go back to virtual address
|
||||
|
||||
|
|
|
@ -141,7 +141,7 @@ __ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
|
|||
return NULL;
|
||||
addr = (unsigned long)area->addr;
|
||||
if (remap_area_pages(addr, pfn, size, flags)) {
|
||||
vfree((void *)addr);
|
||||
vunmap((void *)addr);
|
||||
return NULL;
|
||||
}
|
||||
return (void __iomem *) (offset + (char *)addr);
|
||||
|
@ -173,7 +173,7 @@ EXPORT_SYMBOL(__ioremap);
|
|||
|
||||
void __iounmap(void __iomem *addr)
|
||||
{
|
||||
vfree((void *) (PAGE_MASK & (unsigned long) addr));
|
||||
vunmap((void *)(PAGE_MASK & (unsigned long)addr));
|
||||
}
|
||||
EXPORT_SYMBOL(__iounmap);
|
||||
|
||||
|
|
|
@ -758,10 +758,10 @@ config HOTPLUG_CPU
|
|||
bool "Support for hot-pluggable CPUs (EXPERIMENTAL)"
|
||||
depends on SMP && HOTPLUG && EXPERIMENTAL && !X86_VOYAGER
|
||||
---help---
|
||||
Say Y here to experiment with turning CPUs off and on. CPUs
|
||||
can be controlled through /sys/devices/system/cpu.
|
||||
Say Y here to experiment with turning CPUs off and on, and to
|
||||
enable suspend on SMP systems. CPUs can be controlled through
|
||||
/sys/devices/system/cpu.
|
||||
|
||||
Say N.
|
||||
|
||||
endmenu
|
||||
|
||||
|
|
|
@ -1066,6 +1066,14 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
|
|||
DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = disable_acpi_pci,
|
||||
.ident = "HP xw9300",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "HP xw9300 Workstation"),
|
||||
},
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
|
|
|
@ -1341,6 +1341,14 @@ int __init APIC_init_uniprocessor (void)
|
|||
|
||||
connect_bsp_APIC();
|
||||
|
||||
/*
|
||||
* Hack: In case of kdump, after a crash, kernel might be booting
|
||||
* on a cpu with non-zero lapic id. But boot_cpu_physical_apicid
|
||||
* might be zero if read from MP tables. Get it from LAPIC.
|
||||
*/
|
||||
#ifdef CONFIG_CRASH_DUMP
|
||||
boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
|
||||
#endif
|
||||
phys_cpu_present_map = physid_mask_of_physid(boot_cpu_physical_apicid);
|
||||
|
||||
setup_local_APIC();
|
||||
|
|
|
@ -1320,6 +1320,8 @@ legacy_init_iomem_resources(struct resource *code_resource, struct resource *dat
|
|||
probe_roms();
|
||||
for (i = 0; i < e820.nr_map; i++) {
|
||||
struct resource *res;
|
||||
if (e820.map[i].addr + e820.map[i].size > 0x100000000ULL)
|
||||
continue;
|
||||
res = kzalloc(sizeof(struct resource), GFP_ATOMIC);
|
||||
switch (e820.map[i].type) {
|
||||
case E820_RAM: res->name = "System RAM"; break;
|
||||
|
|
|
@ -130,9 +130,8 @@ static inline int print_addr_and_symbol(unsigned long addr, char *log_lvl,
|
|||
print_symbol("%s", addr);
|
||||
|
||||
printed = (printed + 1) % CONFIG_STACK_BACKTRACE_COLS;
|
||||
|
||||
if (printed)
|
||||
printk(" ");
|
||||
printk(" ");
|
||||
else
|
||||
printk("\n");
|
||||
|
||||
|
@ -212,7 +211,6 @@ static void show_stack_log_lvl(struct task_struct *task, unsigned long *esp,
|
|||
}
|
||||
|
||||
stack = esp;
|
||||
printk(log_lvl);
|
||||
for(i = 0; i < kstack_depth_to_print; i++) {
|
||||
if (kstack_end(stack))
|
||||
break;
|
||||
|
|
|
@ -651,7 +651,7 @@ void __init mem_init(void)
|
|||
* Specifically, in the case of x86, we will always add
|
||||
* memory to the highmem for now.
|
||||
*/
|
||||
#ifdef CONFIG_HOTPLUG_MEMORY
|
||||
#ifdef CONFIG_MEMORY_HOTPLUG
|
||||
#ifndef CONFIG_NEED_MULTIPLE_NODES
|
||||
int add_memory(u64 start, u64 size)
|
||||
{
|
||||
|
|
|
@ -332,10 +332,11 @@ static int __init ppro_init(char ** cpu_type)
|
|||
{
|
||||
__u8 cpu_model = boot_cpu_data.x86_model;
|
||||
|
||||
if (cpu_model > 0xd)
|
||||
if (cpu_model == 14)
|
||||
*cpu_type = "i386/core";
|
||||
else if (cpu_model > 0xd)
|
||||
return 0;
|
||||
|
||||
if (cpu_model == 9) {
|
||||
else if (cpu_model == 9) {
|
||||
*cpu_type = "i386/p6_mobile";
|
||||
} else if (cpu_model > 5) {
|
||||
*cpu_type = "i386/piii";
|
||||
|
|
|
@ -92,7 +92,7 @@ void __restore_processor_state(struct saved_context *ctxt)
|
|||
write_cr4(ctxt->cr4);
|
||||
write_cr3(ctxt->cr3);
|
||||
write_cr2(ctxt->cr2);
|
||||
write_cr2(ctxt->cr0);
|
||||
write_cr0(ctxt->cr0);
|
||||
|
||||
/*
|
||||
* now restore the descriptor tables to their proper values
|
||||
|
|
|
@ -134,7 +134,7 @@ CONFIG_ARCH_FLATMEM_ENABLE=y
|
|||
CONFIG_ARCH_SPARSEMEM_ENABLE=y
|
||||
CONFIG_ARCH_DISCONTIGMEM_DEFAULT=y
|
||||
CONFIG_NUMA=y
|
||||
CONFIG_NODES_SHIFT=8
|
||||
CONFIG_NODES_SHIFT=10
|
||||
CONFIG_VIRTUAL_MEM_MAP=y
|
||||
CONFIG_HOLES_IN_ZONE=y
|
||||
CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID=y
|
||||
|
@ -1159,7 +1159,7 @@ CONFIG_DETECT_SOFTLOCKUP=y
|
|||
# CONFIG_SCHEDSTATS is not set
|
||||
# CONFIG_DEBUG_SLAB is not set
|
||||
CONFIG_DEBUG_PREEMPT=y
|
||||
CONFIG_DEBUG_MUTEXES=y
|
||||
# CONFIG_DEBUG_MUTEXES is not set
|
||||
# CONFIG_DEBUG_SPINLOCK is not set
|
||||
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
|
||||
# CONFIG_DEBUG_KOBJECT is not set
|
||||
|
|
|
@ -416,7 +416,7 @@ iosapic_end_level_irq (unsigned int irq)
|
|||
ia64_vector vec = irq_to_vector(irq);
|
||||
struct iosapic_rte_info *rte;
|
||||
|
||||
move_irq(irq);
|
||||
move_native_irq(irq);
|
||||
list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, rte_list)
|
||||
iosapic_eoi(rte->addr, vec);
|
||||
}
|
||||
|
@ -458,7 +458,7 @@ iosapic_ack_edge_irq (unsigned int irq)
|
|||
{
|
||||
irq_desc_t *idesc = irq_descp(irq);
|
||||
|
||||
move_irq(irq);
|
||||
move_native_irq(irq);
|
||||
/*
|
||||
* Once we have recorded IRQ_PENDING already, we can mask the
|
||||
* interrupt for real. This prevents IRQ storms from unhandled
|
||||
|
|
|
@ -101,7 +101,6 @@ void set_irq_affinity_info (unsigned int irq, int hwid, int redir)
|
|||
|
||||
if (irq < NR_IRQS) {
|
||||
irq_affinity[irq] = mask;
|
||||
set_irq_info(irq, mask);
|
||||
irq_redir[irq] = (char) (redir & 0xff);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1636,7 +1636,7 @@ static int __init prom_find_machine_type(void)
|
|||
compat, sizeof(compat)-1);
|
||||
if (len <= 0)
|
||||
return PLATFORM_GENERIC;
|
||||
if (strncmp(compat, RELOC("chrp"), 4))
|
||||
if (strcmp(compat, RELOC("chrp")))
|
||||
return PLATFORM_GENERIC;
|
||||
|
||||
/* Default to pSeries. We need to know if we are running LPAR */
|
||||
|
|
|
@ -338,6 +338,8 @@ SYSCALL(symlinkat)
|
|||
SYSCALL(readlinkat)
|
||||
SYSCALL(fchmodat)
|
||||
SYSCALL(faccessat)
|
||||
COMPAT_SYS(get_robust_list)
|
||||
COMPAT_SYS(set_robust_list)
|
||||
|
||||
/*
|
||||
* please add new calls to arch/powerpc/platforms/cell/spu_callbacks.c
|
||||
|
|
|
@ -258,6 +258,7 @@ void *spu_syscall_table[] = {
|
|||
[__NR_futex] sys_futex,
|
||||
[__NR_sched_setaffinity] sys_sched_setaffinity,
|
||||
[__NR_sched_getaffinity] sys_sched_getaffinity,
|
||||
[224] sys_ni_syscall,
|
||||
[__NR_tuxcall] sys_ni_syscall,
|
||||
[226] sys_ni_syscall,
|
||||
[__NR_io_setup] sys_io_setup,
|
||||
|
@ -332,19 +333,21 @@ void *spu_syscall_table[] = {
|
|||
[__NR_readlinkat] sys_readlinkat,
|
||||
[__NR_fchmodat] sys_fchmodat,
|
||||
[__NR_faccessat] sys_faccessat,
|
||||
[__NR_get_robust_list] sys_get_robust_list,
|
||||
[__NR_set_robust_list] sys_set_robust_list,
|
||||
};
|
||||
|
||||
long spu_sys_callback(struct spu_syscall_block *s)
|
||||
{
|
||||
long (*syscall)(u64 a1, u64 a2, u64 a3, u64 a4, u64 a5, u64 a6);
|
||||
|
||||
syscall = spu_syscall_table[s->nr_ret];
|
||||
|
||||
if (s->nr_ret >= ARRAY_SIZE(spu_syscall_table)) {
|
||||
pr_debug("%s: invalid syscall #%ld", __FUNCTION__, s->nr_ret);
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
syscall = spu_syscall_table[s->nr_ret];
|
||||
|
||||
#ifdef DEBUG
|
||||
print_symbol(KERN_DEBUG "SPU-syscall %s:", (unsigned long)syscall);
|
||||
printk("syscall%ld(%lx, %lx, %lx, %lx, %lx, %lx)\n",
|
||||
|
|
|
@ -255,7 +255,7 @@ static int __init pSeries_init_panel(void)
|
|||
{
|
||||
/* Manually leave the kernel version on the panel. */
|
||||
ppc_md.progress("Linux ppc64\n", 0);
|
||||
ppc_md.progress(system_utsname.version, 0);
|
||||
ppc_md.progress(system_utsname.release, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1650,3 +1650,11 @@ sys_tee_wrapper:
|
|||
llgfr %r4,%r4 # size_t
|
||||
llgfr %r5,%r5 # unsigned int
|
||||
jg sys_tee
|
||||
|
||||
.globl compat_sys_vmsplice_wrapper
|
||||
compat_sys_vmsplice_wrapper:
|
||||
lgfr %r2,%r2 # int
|
||||
llgtr %r3,%r3 # compat_iovec *
|
||||
llgfr %r4,%r4 # unsigned int
|
||||
llgfr %r5,%r5 # unsigned int
|
||||
jg compat_sys_vmsplice
|
||||
|
|
|
@ -317,3 +317,4 @@ SYSCALL(sys_get_robust_list,sys_get_robust_list,compat_sys_get_robust_list_wrapp
|
|||
SYSCALL(sys_splice,sys_splice,sys_splice_wrapper)
|
||||
SYSCALL(sys_sync_file_range,sys_sync_file_range,sys_sync_file_range_wrapper)
|
||||
SYSCALL(sys_tee,sys_tee,sys_tee_wrapper)
|
||||
SYSCALL(sys_vmsplice,sys_vmsplice,compat_sys_vmsplice_wrapper)
|
||||
|
|
|
@ -249,18 +249,19 @@ static inline void stop_hz_timer(void)
|
|||
unsigned long flags;
|
||||
unsigned long seq, next;
|
||||
__u64 timer, todval;
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
if (sysctl_hz_timer != 0)
|
||||
return;
|
||||
|
||||
cpu_set(smp_processor_id(), nohz_cpu_mask);
|
||||
cpu_set(cpu, nohz_cpu_mask);
|
||||
|
||||
/*
|
||||
* Leave the clock comparator set up for the next timer
|
||||
* tick if either rcu or a softirq is pending.
|
||||
*/
|
||||
if (rcu_pending(smp_processor_id()) || local_softirq_pending()) {
|
||||
cpu_clear(smp_processor_id(), nohz_cpu_mask);
|
||||
if (rcu_needs_cpu(cpu) || local_softirq_pending()) {
|
||||
cpu_clear(cpu, nohz_cpu_mask);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -271,7 +272,7 @@ static inline void stop_hz_timer(void)
|
|||
next = next_timer_interrupt();
|
||||
do {
|
||||
seq = read_seqbegin_irqsave(&xtime_lock, flags);
|
||||
timer = (__u64)(next - jiffies) + jiffies_64;
|
||||
timer = (__u64 next) - (__u64 jiffies) + jiffies_64;
|
||||
} while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
|
||||
todval = -1ULL;
|
||||
/* Be careful about overflows. */
|
||||
|
|
|
@ -274,6 +274,11 @@ void *sbus_alloc_consistent(struct sbus_dev *sdev, long len, u32 *dma_addrp)
|
|||
if (mmu_map_dma_area(dma_addrp, va, res->start, len_total) != 0)
|
||||
goto err_noiommu;
|
||||
|
||||
/* Set the resource name, if known. */
|
||||
if (sdev) {
|
||||
res->name = sdev->prom_name;
|
||||
}
|
||||
|
||||
return (void *)res->start;
|
||||
|
||||
err_noiommu:
|
||||
|
|
|
@ -113,6 +113,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
|
|||
|
||||
switch (ELF32_R_TYPE(rel[i].r_info)) {
|
||||
case R_SPARC_32:
|
||||
case R_SPARC_UA32:
|
||||
location[0] = v >> 24;
|
||||
location[1] = v >> 16;
|
||||
location[2] = v >> 8;
|
||||
|
|
|
@ -251,19 +251,9 @@ EXPORT_SYMBOL(__prom_getchild);
|
|||
EXPORT_SYMBOL(__prom_getsibling);
|
||||
|
||||
/* sparc library symbols */
|
||||
EXPORT_SYMBOL(memchr);
|
||||
EXPORT_SYMBOL(memscan);
|
||||
EXPORT_SYMBOL(strlen);
|
||||
EXPORT_SYMBOL(strnlen);
|
||||
EXPORT_SYMBOL(strcpy);
|
||||
EXPORT_SYMBOL(strncpy);
|
||||
EXPORT_SYMBOL(strcat);
|
||||
EXPORT_SYMBOL(strncat);
|
||||
EXPORT_SYMBOL(strcmp);
|
||||
EXPORT_SYMBOL(strncmp);
|
||||
EXPORT_SYMBOL(strchr);
|
||||
EXPORT_SYMBOL(strrchr);
|
||||
EXPORT_SYMBOL(strstr);
|
||||
EXPORT_SYMBOL(page_kernel);
|
||||
|
||||
/* Special internal versions of library functions. */
|
||||
|
@ -317,6 +307,3 @@ EXPORT_SYMBOL(do_BUG);
|
|||
|
||||
/* Sun Power Management Idle Handler */
|
||||
EXPORT_SYMBOL(pm_idle);
|
||||
|
||||
/* Binfmt_misc needs this */
|
||||
EXPORT_SYMBOL(sys_close);
|
||||
|
|
|
@ -79,6 +79,7 @@ sys_call_table:
|
|||
/*285*/ .long sys_mkdirat, sys_mknodat, sys_fchownat, sys_futimesat, sys_fstatat64
|
||||
/*290*/ .long sys_unlinkat, sys_renameat, sys_linkat, sys_symlinkat, sys_readlinkat
|
||||
/*295*/ .long sys_fchmodat, sys_faccessat, sys_pselect6, sys_ppoll, sys_unshare
|
||||
/*300*/ .long sys_set_robust_list, sys_get_robust_list
|
||||
|
||||
#ifdef CONFIG_SUNOS_EMUL
|
||||
/* Now the SunOS syscall table. */
|
||||
|
@ -190,6 +191,6 @@ sunos_sys_table:
|
|||
/*290*/ .long sunos_nosys, sunos_nosys, sunos_nosys
|
||||
.long sunos_nosys, sunos_nosys, sunos_nosys
|
||||
.long sunos_nosys, sunos_nosys, sunos_nosys
|
||||
.long sunos_nosys
|
||||
.long sunos_nosys, sunos_nosys, sunos_nosys
|
||||
|
||||
#endif
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
#
|
||||
# Automatically generated make config: don't edit
|
||||
# Linux kernel version: 2.6.16
|
||||
# Sun Apr 2 19:31:04 2006
|
||||
# Linux kernel version: 2.6.17-rc3
|
||||
# Fri May 12 12:43:49 2006
|
||||
#
|
||||
CONFIG_SPARC=y
|
||||
CONFIG_SPARC64=y
|
||||
|
@ -114,6 +114,7 @@ CONFIG_GENERIC_CALIBRATE_DELAY=y
|
|||
CONFIG_HUGETLB_PAGE_SIZE_4MB=y
|
||||
# CONFIG_HUGETLB_PAGE_SIZE_512K is not set
|
||||
# CONFIG_HUGETLB_PAGE_SIZE_64K is not set
|
||||
CONFIG_ARCH_SELECT_MEMORY_MODEL=y
|
||||
CONFIG_ARCH_SPARSEMEM_ENABLE=y
|
||||
CONFIG_ARCH_SPARSEMEM_DEFAULT=y
|
||||
CONFIG_LARGE_ALLOCS=y
|
||||
|
@ -430,7 +431,6 @@ CONFIG_ISCSI_TCP=m
|
|||
# CONFIG_SCSI_INIA100 is not set
|
||||
# CONFIG_SCSI_SYM53C8XX_2 is not set
|
||||
# CONFIG_SCSI_IPR is not set
|
||||
# CONFIG_SCSI_QLOGIC_FC is not set
|
||||
# CONFIG_SCSI_QLOGIC_1280 is not set
|
||||
# CONFIG_SCSI_QLOGICPTI is not set
|
||||
# CONFIG_SCSI_QLA_FC is not set
|
||||
|
@ -1042,9 +1042,7 @@ CONFIG_USB_HIDDEV=y
|
|||
# CONFIG_USB_ACECAD is not set
|
||||
# CONFIG_USB_KBTAB is not set
|
||||
# CONFIG_USB_POWERMATE is not set
|
||||
# CONFIG_USB_MTOUCH is not set
|
||||
# CONFIG_USB_ITMTOUCH is not set
|
||||
# CONFIG_USB_EGALAX is not set
|
||||
# CONFIG_USB_TOUCHSCREEN is not set
|
||||
# CONFIG_USB_YEALINK is not set
|
||||
# CONFIG_USB_XPAD is not set
|
||||
# CONFIG_USB_ATI_REMOTE is not set
|
||||
|
@ -1114,6 +1112,14 @@ CONFIG_USB_HIDDEV=y
|
|||
#
|
||||
# CONFIG_NEW_LEDS is not set
|
||||
|
||||
#
|
||||
# LED drivers
|
||||
#
|
||||
|
||||
#
|
||||
# LED Triggers
|
||||
#
|
||||
|
||||
#
|
||||
# InfiniBand support
|
||||
#
|
||||
|
@ -1303,6 +1309,7 @@ CONFIG_DEBUG_BUGVERBOSE=y
|
|||
# CONFIG_DEBUG_INFO is not set
|
||||
CONFIG_DEBUG_FS=y
|
||||
# CONFIG_DEBUG_VM is not set
|
||||
# CONFIG_UNWIND_INFO is not set
|
||||
CONFIG_FORCED_INLINING=y
|
||||
# CONFIG_RCU_TORTURE_TEST is not set
|
||||
# CONFIG_DEBUG_STACK_USAGE is not set
|
||||
|
|
|
@ -143,6 +143,11 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
|
|||
location[3] = v >> 0;
|
||||
break;
|
||||
|
||||
case R_SPARC_DISP32:
|
||||
v -= (Elf64_Addr) location;
|
||||
*loc32 = v;
|
||||
break;
|
||||
|
||||
case R_SPARC_WDISP30:
|
||||
v -= (Elf64_Addr) location;
|
||||
*loc32 = (*loc32 & ~0x3fffffff) |
|
||||
|
|
|
@ -218,7 +218,7 @@ static inline void iommu_free_ctx(struct pci_iommu *iommu, int ctx)
|
|||
* DMA for PCI device PDEV. Return non-NULL cpu-side address if
|
||||
* successful and set *DMA_ADDRP to the PCI side dma address.
|
||||
*/
|
||||
static void *pci_4u_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp)
|
||||
static void *pci_4u_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp, gfp_t gfp)
|
||||
{
|
||||
struct pcidev_cookie *pcp;
|
||||
struct pci_iommu *iommu;
|
||||
|
@ -232,7 +232,7 @@ static void *pci_4u_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr
|
|||
if (order >= 10)
|
||||
return NULL;
|
||||
|
||||
first_page = __get_free_pages(GFP_ATOMIC, order);
|
||||
first_page = __get_free_pages(gfp, order);
|
||||
if (first_page == 0UL)
|
||||
return NULL;
|
||||
memset((char *)first_page, 0, PAGE_SIZE << order);
|
||||
|
|
|
@ -154,7 +154,7 @@ static void pci_arena_free(struct pci_iommu_arena *arena, unsigned long base, un
|
|||
__clear_bit(i, arena->map);
|
||||
}
|
||||
|
||||
static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp)
|
||||
static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp, gfp_t gfp)
|
||||
{
|
||||
struct pcidev_cookie *pcp;
|
||||
struct pci_iommu *iommu;
|
||||
|
@ -169,7 +169,7 @@ static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr
|
|||
|
||||
npages = size >> IO_PAGE_SHIFT;
|
||||
|
||||
first_page = __get_free_pages(GFP_ATOMIC, order);
|
||||
first_page = __get_free_pages(gfp, order);
|
||||
if (unlikely(first_page == 0UL))
|
||||
return NULL;
|
||||
|
||||
|
|
|
@ -78,8 +78,9 @@ sys_call_table32:
|
|||
.word compat_sys_mq_timedsend, compat_sys_mq_timedreceive, compat_sys_mq_notify, compat_sys_mq_getsetattr, compat_sys_waitid
|
||||
/*280*/ .word sys32_tee, sys_add_key, sys_request_key, sys_keyctl, compat_sys_openat
|
||||
.word sys_mkdirat, sys_mknodat, sys_fchownat, compat_sys_futimesat, compat_sys_fstatat64
|
||||
/*285*/ .word sys_unlinkat, sys_renameat, sys_linkat, sys_symlinkat, sys_readlinkat
|
||||
/*290*/ .word sys_unlinkat, sys_renameat, sys_linkat, sys_symlinkat, sys_readlinkat
|
||||
.word sys_fchmodat, sys_faccessat, compat_sys_pselect6, compat_sys_ppoll, sys_unshare
|
||||
/*300*/ .word compat_sys_set_robust_list, compat_sys_get_robust_list
|
||||
|
||||
#endif /* CONFIG_COMPAT */
|
||||
|
||||
|
@ -147,8 +148,9 @@ sys_call_table:
|
|||
.word sys_mq_timedsend, sys_mq_timedreceive, sys_mq_notify, sys_mq_getsetattr, sys_waitid
|
||||
/*280*/ .word sys_tee, sys_add_key, sys_request_key, sys_keyctl, sys_openat
|
||||
.word sys_mkdirat, sys_mknodat, sys_fchownat, sys_futimesat, sys_fstatat64
|
||||
/*285*/ .word sys_unlinkat, sys_renameat, sys_linkat, sys_symlinkat, sys_readlinkat
|
||||
/*290*/ .word sys_unlinkat, sys_renameat, sys_linkat, sys_symlinkat, sys_readlinkat
|
||||
.word sys_fchmodat, sys_faccessat, sys_pselect6, sys_ppoll, sys_unshare
|
||||
/*300*/ .word sys_set_robust_list, sys_get_robust_list
|
||||
|
||||
#if defined(CONFIG_SUNOS_EMUL) || defined(CONFIG_SOLARIS_EMUL) || \
|
||||
defined(CONFIG_SOLARIS_EMUL_MODULE)
|
||||
|
@ -261,5 +263,5 @@ sunos_sys_table:
|
|||
/*290*/ .word sunos_nosys, sunos_nosys, sunos_nosys
|
||||
.word sunos_nosys, sunos_nosys, sunos_nosys
|
||||
.word sunos_nosys, sunos_nosys, sunos_nosys
|
||||
.word sunos_nosys
|
||||
.word sunos_nosys, sunos_nosys, sunos_nosys
|
||||
#endif
|
||||
|
|
|
@ -514,13 +514,13 @@ static void __kprobes resume_execution(struct kprobe *p,
|
|||
*tos = orig_rip + (*tos - copy_rip);
|
||||
break;
|
||||
case 0xff:
|
||||
if ((*insn & 0x30) == 0x10) {
|
||||
if ((insn[1] & 0x30) == 0x10) {
|
||||
/* call absolute, indirect */
|
||||
/* Fix return addr; rip is correct. */
|
||||
next_rip = regs->rip;
|
||||
*tos = orig_rip + (*tos - copy_rip);
|
||||
} else if (((*insn & 0x31) == 0x20) || /* jmp near, absolute indirect */
|
||||
((*insn & 0x31) == 0x21)) { /* jmp far, absolute indirect */
|
||||
} else if (((insn[1] & 0x31) == 0x20) || /* jmp near, absolute indirect */
|
||||
((insn[1] & 0x31) == 0x21)) { /* jmp far, absolute indirect */
|
||||
/* rip is correct. */
|
||||
next_rip = regs->rip;
|
||||
}
|
||||
|
|
|
@ -12,9 +12,10 @@ static int
|
|||
check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size)
|
||||
{
|
||||
if (hwdev && bus + size > *hwdev->dma_mask) {
|
||||
printk(KERN_ERR
|
||||
"nommu_%s: overflow %Lx+%lu of device mask %Lx\n",
|
||||
name, (long long)bus, size, (long long)*hwdev->dma_mask);
|
||||
if (*hwdev->dma_mask >= 0xffffffffULL)
|
||||
printk(KERN_ERR
|
||||
"nommu_%s: overflow %Lx+%lu of device mask %Lx\n",
|
||||
name, (long long)bus, size, (long long)*hwdev->dma_mask);
|
||||
return 0;
|
||||
}
|
||||
return 1;
|
||||
|
|
|
@ -102,6 +102,8 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
|
|||
{
|
||||
if (regs->eflags & X86_EFLAGS_IF)
|
||||
local_irq_disable();
|
||||
/* Make sure to not schedule here because we could be running
|
||||
on an exception stack. */
|
||||
preempt_enable_no_resched();
|
||||
}
|
||||
|
||||
|
@ -483,8 +485,6 @@ static void __kprobes do_trap(int trapnr, int signr, char *str,
|
|||
{
|
||||
struct task_struct *tsk = current;
|
||||
|
||||
conditional_sti(regs);
|
||||
|
||||
tsk->thread.error_code = error_code;
|
||||
tsk->thread.trap_no = trapnr;
|
||||
|
||||
|
@ -521,6 +521,7 @@ asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
|
|||
if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
|
||||
== NOTIFY_STOP) \
|
||||
return; \
|
||||
conditional_sti(regs); \
|
||||
do_trap(trapnr, signr, str, regs, error_code, NULL); \
|
||||
}
|
||||
|
||||
|
@ -535,6 +536,7 @@ asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
|
|||
if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
|
||||
== NOTIFY_STOP) \
|
||||
return; \
|
||||
conditional_sti(regs); \
|
||||
do_trap(trapnr, signr, str, regs, error_code, &info); \
|
||||
}
|
||||
|
||||
|
@ -548,7 +550,17 @@ DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
|
|||
DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
|
||||
DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
|
||||
DO_ERROR(18, SIGSEGV, "reserved", reserved)
|
||||
DO_ERROR(12, SIGBUS, "stack segment", stack_segment)
|
||||
|
||||
/* Runs on IST stack */
|
||||
asmlinkage void do_stack_segment(struct pt_regs *regs, long error_code)
|
||||
{
|
||||
if (notify_die(DIE_TRAP, "stack segment", regs, error_code,
|
||||
12, SIGBUS) == NOTIFY_STOP)
|
||||
return;
|
||||
preempt_conditional_sti(regs);
|
||||
do_trap(12, SIGBUS, "stack segment", regs, error_code, NULL);
|
||||
preempt_conditional_cli(regs);
|
||||
}
|
||||
|
||||
asmlinkage void do_double_fault(struct pt_regs * regs, long error_code)
|
||||
{
|
||||
|
@ -682,8 +694,9 @@ asmlinkage void __kprobes do_int3(struct pt_regs * regs, long error_code)
|
|||
if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) == NOTIFY_STOP) {
|
||||
return;
|
||||
}
|
||||
preempt_conditional_sti(regs);
|
||||
do_trap(3, SIGTRAP, "int3", regs, error_code, NULL);
|
||||
return;
|
||||
preempt_conditional_cli(regs);
|
||||
}
|
||||
|
||||
/* Help handler running on IST stack to switch back to user stack
|
||||
|
|
|
@ -34,7 +34,10 @@ static nodemask_t nodes_found __initdata;
|
|||
static struct bootnode nodes[MAX_NUMNODES] __initdata;
|
||||
static struct bootnode nodes_add[MAX_NUMNODES] __initdata;
|
||||
static int found_add_area __initdata;
|
||||
int hotadd_percent __initdata = 10;
|
||||
int hotadd_percent __initdata = 0;
|
||||
#ifndef RESERVE_HOTADD
|
||||
#define hotadd_percent 0 /* Ignore all settings */
|
||||
#endif
|
||||
static u8 pxm2node[256] = { [0 ... 255] = 0xff };
|
||||
|
||||
/* Too small nodes confuse the VM badly. Usually they result
|
||||
|
@ -103,6 +106,7 @@ static __init void bad_srat(void)
|
|||
int i;
|
||||
printk(KERN_ERR "SRAT: SRAT not used.\n");
|
||||
acpi_numa = -1;
|
||||
found_add_area = 0;
|
||||
for (i = 0; i < MAX_LOCAL_APIC; i++)
|
||||
apicid_to_node[i] = NUMA_NO_NODE;
|
||||
for (i = 0; i < MAX_NUMNODES; i++)
|
||||
|
@ -154,7 +158,8 @@ acpi_numa_processor_affinity_init(struct acpi_table_processor_affinity *pa)
|
|||
int pxm, node;
|
||||
if (srat_disabled())
|
||||
return;
|
||||
if (pa->header.length != sizeof(struct acpi_table_processor_affinity)) { bad_srat();
|
||||
if (pa->header.length != sizeof(struct acpi_table_processor_affinity)) {
|
||||
bad_srat();
|
||||
return;
|
||||
}
|
||||
if (pa->flags.enabled == 0)
|
||||
|
@ -191,15 +196,17 @@ static int hotadd_enough_memory(struct bootnode *nd)
|
|||
allowed = (end_pfn - e820_hole_size(0, end_pfn)) * PAGE_SIZE;
|
||||
allowed = (allowed / 100) * hotadd_percent;
|
||||
if (allocated + mem > allowed) {
|
||||
unsigned long range;
|
||||
/* Give them at least part of their hotadd memory upto hotadd_percent
|
||||
It would be better to spread the limit out
|
||||
over multiple hotplug areas, but that is too complicated
|
||||
right now */
|
||||
if (allocated >= allowed)
|
||||
return 0;
|
||||
pages = (allowed - allocated + mem) / sizeof(struct page);
|
||||
range = allowed - allocated;
|
||||
pages = (range / PAGE_SIZE);
|
||||
mem = pages * sizeof(struct page);
|
||||
nd->end = nd->start + pages*PAGE_SIZE;
|
||||
nd->end = nd->start + range;
|
||||
}
|
||||
/* Not completely fool proof, but a good sanity check */
|
||||
addr = find_e820_area(last_area_end, end_pfn<<PAGE_SHIFT, mem);
|
||||
|
|
|
@ -182,7 +182,6 @@ static int exact_lock(dev_t dev, void *data)
|
|||
*/
|
||||
void add_disk(struct gendisk *disk)
|
||||
{
|
||||
get_device(disk->driverfs_dev);
|
||||
disk->flags |= GENHD_FL_UP;
|
||||
blk_register_region(MKDEV(disk->major, disk->first_minor),
|
||||
disk->minors, NULL, exact_match, exact_lock, disk);
|
||||
|
@ -428,7 +427,6 @@ static struct attribute * default_attrs[] = {
|
|||
static void disk_release(struct kobject * kobj)
|
||||
{
|
||||
struct gendisk *disk = to_disk(kobj);
|
||||
put_device(disk->driverfs_dev);
|
||||
kfree(disk->random);
|
||||
kfree(disk->part);
|
||||
free_disk_stats(disk);
|
||||
|
|
|
@ -3452,7 +3452,12 @@ void end_that_request_last(struct request *req, int uptodate)
|
|||
if (unlikely(laptop_mode) && blk_fs_request(req))
|
||||
laptop_io_completion();
|
||||
|
||||
if (disk && blk_fs_request(req)) {
|
||||
/*
|
||||
* Account IO completion. bar_rq isn't accounted as a normal
|
||||
* IO on queueing nor completion. Accounting the containing
|
||||
* request is enough.
|
||||
*/
|
||||
if (disk && blk_fs_request(req) && req != &req->q->bar_rq) {
|
||||
unsigned long duration = jiffies - req->start_time;
|
||||
const int rw = rq_data_dir(req);
|
||||
|
||||
|
|
|
@ -86,18 +86,9 @@ firmware_timeout_store(struct class *class, const char *buf, size_t count)
|
|||
static CLASS_ATTR(timeout, 0644, firmware_timeout_show, firmware_timeout_store);
|
||||
|
||||
static void fw_class_dev_release(struct class_device *class_dev);
|
||||
int firmware_class_uevent(struct class_device *dev, char **envp,
|
||||
int num_envp, char *buffer, int buffer_size);
|
||||
|
||||
static struct class firmware_class = {
|
||||
.name = "firmware",
|
||||
.uevent = firmware_class_uevent,
|
||||
.release = fw_class_dev_release,
|
||||
};
|
||||
|
||||
int
|
||||
firmware_class_uevent(struct class_device *class_dev, char **envp,
|
||||
int num_envp, char *buffer, int buffer_size)
|
||||
static int firmware_class_uevent(struct class_device *class_dev, char **envp,
|
||||
int num_envp, char *buffer, int buffer_size)
|
||||
{
|
||||
struct firmware_priv *fw_priv = class_get_devdata(class_dev);
|
||||
int i = 0, len = 0;
|
||||
|
@ -116,6 +107,12 @@ firmware_class_uevent(struct class_device *class_dev, char **envp,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static struct class firmware_class = {
|
||||
.name = "firmware",
|
||||
.uevent = firmware_class_uevent,
|
||||
.release = fw_class_dev_release,
|
||||
};
|
||||
|
||||
static ssize_t
|
||||
firmware_loading_show(struct class_device *class_dev, char *buf)
|
||||
{
|
||||
|
@ -493,25 +490,6 @@ release_firmware(const struct firmware *fw)
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* register_firmware: - provide a firmware image for later usage
|
||||
* @name: name of firmware image file
|
||||
* @data: buffer pointer for the firmware image
|
||||
* @size: size of the data buffer area
|
||||
*
|
||||
* Make sure that @data will be available by requesting firmware @name.
|
||||
*
|
||||
* Note: This will not be possible until some kind of persistence
|
||||
* is available.
|
||||
**/
|
||||
void
|
||||
register_firmware(const char *name, const u8 *data, size_t size)
|
||||
{
|
||||
/* This is meaningless without firmware caching, so until we
|
||||
* decide if firmware caching is reasonable just leave it as a
|
||||
* noop */
|
||||
}
|
||||
|
||||
/* Async support */
|
||||
struct firmware_work {
|
||||
struct work_struct work;
|
||||
|
@ -630,4 +608,3 @@ module_exit(firmware_class_exit);
|
|||
EXPORT_SYMBOL(release_firmware);
|
||||
EXPORT_SYMBOL(request_firmware);
|
||||
EXPORT_SYMBOL(request_firmware_nowait);
|
||||
EXPORT_SYMBOL(register_firmware);
|
||||
|
|
|
@ -536,6 +536,9 @@ static void ub_cleanup(struct ub_dev *sc)
|
|||
kfree(lun);
|
||||
}
|
||||
|
||||
usb_set_intfdata(sc->intf, NULL);
|
||||
usb_put_intf(sc->intf);
|
||||
usb_put_dev(sc->dev);
|
||||
kfree(sc);
|
||||
}
|
||||
|
||||
|
@ -2221,7 +2224,12 @@ static int ub_probe(struct usb_interface *intf,
|
|||
// sc->ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
|
||||
usb_set_intfdata(intf, sc);
|
||||
usb_get_dev(sc->dev);
|
||||
// usb_get_intf(sc->intf); /* Do we need this? */
|
||||
/*
|
||||
* Since we give the interface struct to the block level through
|
||||
* disk->driverfs_dev, we have to pin it. Otherwise, block_uevent
|
||||
* oopses on close after a disconnect (kernels 2.6.16 and up).
|
||||
*/
|
||||
usb_get_intf(sc->intf);
|
||||
|
||||
snprintf(sc->name, 12, DRV_NAME "(%d.%d)",
|
||||
sc->dev->bus->busnum, sc->dev->devnum);
|
||||
|
@ -2286,7 +2294,7 @@ static int ub_probe(struct usb_interface *intf,
|
|||
|
||||
err_dev_desc:
|
||||
usb_set_intfdata(intf, NULL);
|
||||
// usb_put_intf(sc->intf);
|
||||
usb_put_intf(sc->intf);
|
||||
usb_put_dev(sc->dev);
|
||||
kfree(sc);
|
||||
err_core:
|
||||
|
@ -2461,12 +2469,6 @@ static void ub_disconnect(struct usb_interface *intf)
|
|||
* and no URBs left in transit.
|
||||
*/
|
||||
|
||||
usb_set_intfdata(intf, NULL);
|
||||
// usb_put_intf(sc->intf);
|
||||
sc->intf = NULL;
|
||||
usb_put_dev(sc->dev);
|
||||
sc->dev = NULL;
|
||||
|
||||
ub_put(sc);
|
||||
}
|
||||
|
||||
|
|
|
@ -291,7 +291,7 @@ config SX
|
|||
|
||||
config RIO
|
||||
tristate "Specialix RIO system support"
|
||||
depends on SERIAL_NONSTANDARD && !64BIT
|
||||
depends on SERIAL_NONSTANDARD
|
||||
help
|
||||
This is a driver for the Specialix RIO, a smart serial card which
|
||||
drives an outboard box that can support up to 128 ports. Product
|
||||
|
|
|
@ -33,12 +33,6 @@
|
|||
#ifndef __rio_host_h__
|
||||
#define __rio_host_h__
|
||||
|
||||
#ifdef SCCS_LABELS
|
||||
#ifndef lint
|
||||
static char *_host_h_sccs_ = "@(#)host.h 1.2";
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/*
|
||||
** the host structure - one per host card in the system.
|
||||
*/
|
||||
|
@ -77,9 +71,6 @@ struct Host {
|
|||
#define RC_STARTUP 1
|
||||
#define RC_RUNNING 2
|
||||
#define RC_STUFFED 3
|
||||
#define RC_SOMETHING 4
|
||||
#define RC_SOMETHING_NEW 5
|
||||
#define RC_SOMETHING_ELSE 6
|
||||
#define RC_READY 7
|
||||
#define RUN_STATE 7
|
||||
/*
|
||||
|
|
|
@ -34,6 +34,7 @@
|
|||
#include <linux/slab.h>
|
||||
#include <linux/termios.h>
|
||||
#include <linux/serial.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <asm/semaphore.h>
|
||||
#include <linux/generic_serial.h>
|
||||
#include <linux/errno.h>
|
||||
|
|
|
@ -1394,14 +1394,17 @@ int RIOPreemptiveCmd(struct rio_info *p, struct Port *PortP, u8 Cmd)
|
|||
return RIO_FAIL;
|
||||
}
|
||||
|
||||
if (((int) ((char) PortP->InUse) == -1) || !(CmdBlkP = RIOGetCmdBlk())) {
|
||||
rio_dprintk(RIO_DEBUG_CTRL, "Cannot allocate command block for command %d on port %d\n", Cmd, PortP->PortNum);
|
||||
if ((PortP->InUse == (typeof(PortP->InUse))-1) ||
|
||||
!(CmdBlkP = RIOGetCmdBlk())) {
|
||||
rio_dprintk(RIO_DEBUG_CTRL, "Cannot allocate command block "
|
||||
"for command %d on port %d\n", Cmd, PortP->PortNum);
|
||||
return RIO_FAIL;
|
||||
}
|
||||
|
||||
rio_dprintk(RIO_DEBUG_CTRL, "Command blk %p - InUse now %d\n", CmdBlkP, PortP->InUse);
|
||||
rio_dprintk(RIO_DEBUG_CTRL, "Command blk %p - InUse now %d\n",
|
||||
CmdBlkP, PortP->InUse);
|
||||
|
||||
PktCmdP = (struct PktCmd_M *) &CmdBlkP->Packet.data[0];
|
||||
PktCmdP = (struct PktCmd_M *)&CmdBlkP->Packet.data[0];
|
||||
|
||||
CmdBlkP->Packet.src_unit = 0;
|
||||
if (PortP->SecondBlock)
|
||||
|
@ -1425,38 +1428,46 @@ int RIOPreemptiveCmd(struct rio_info *p, struct Port *PortP, u8 Cmd)
|
|||
|
||||
switch (Cmd) {
|
||||
case MEMDUMP:
|
||||
rio_dprintk(RIO_DEBUG_CTRL, "Queue MEMDUMP command blk %p (addr 0x%x)\n", CmdBlkP, (int) SubCmd.Addr);
|
||||
rio_dprintk(RIO_DEBUG_CTRL, "Queue MEMDUMP command blk %p "
|
||||
"(addr 0x%x)\n", CmdBlkP, (int) SubCmd.Addr);
|
||||
PktCmdP->SubCommand = MEMDUMP;
|
||||
PktCmdP->SubAddr = SubCmd.Addr;
|
||||
break;
|
||||
case FCLOSE:
|
||||
rio_dprintk(RIO_DEBUG_CTRL, "Queue FCLOSE command blk %p\n", CmdBlkP);
|
||||
rio_dprintk(RIO_DEBUG_CTRL, "Queue FCLOSE command blk %p\n",
|
||||
CmdBlkP);
|
||||
break;
|
||||
case READ_REGISTER:
|
||||
rio_dprintk(RIO_DEBUG_CTRL, "Queue READ_REGISTER (0x%x) command blk %p\n", (int) SubCmd.Addr, CmdBlkP);
|
||||
rio_dprintk(RIO_DEBUG_CTRL, "Queue READ_REGISTER (0x%x) "
|
||||
"command blk %p\n", (int) SubCmd.Addr, CmdBlkP);
|
||||
PktCmdP->SubCommand = READ_REGISTER;
|
||||
PktCmdP->SubAddr = SubCmd.Addr;
|
||||
break;
|
||||
case RESUME:
|
||||
rio_dprintk(RIO_DEBUG_CTRL, "Queue RESUME command blk %p\n", CmdBlkP);
|
||||
rio_dprintk(RIO_DEBUG_CTRL, "Queue RESUME command blk %p\n",
|
||||
CmdBlkP);
|
||||
break;
|
||||
case RFLUSH:
|
||||
rio_dprintk(RIO_DEBUG_CTRL, "Queue RFLUSH command blk %p\n", CmdBlkP);
|
||||
rio_dprintk(RIO_DEBUG_CTRL, "Queue RFLUSH command blk %p\n",
|
||||
CmdBlkP);
|
||||
CmdBlkP->PostFuncP = RIORFlushEnable;
|
||||
break;
|
||||
case SUSPEND:
|
||||
rio_dprintk(RIO_DEBUG_CTRL, "Queue SUSPEND command blk %p\n", CmdBlkP);
|
||||
rio_dprintk(RIO_DEBUG_CTRL, "Queue SUSPEND command blk %p\n",
|
||||
CmdBlkP);
|
||||
break;
|
||||
|
||||
case MGET:
|
||||
rio_dprintk(RIO_DEBUG_CTRL, "Queue MGET command blk %p\n", CmdBlkP);
|
||||
rio_dprintk(RIO_DEBUG_CTRL, "Queue MGET command blk %p\n",
|
||||
CmdBlkP);
|
||||
break;
|
||||
|
||||
case MSET:
|
||||
case MBIC:
|
||||
case MBIS:
|
||||
CmdBlkP->Packet.data[4] = (char) PortP->ModemLines;
|
||||
rio_dprintk(RIO_DEBUG_CTRL, "Queue MSET/MBIC/MBIS command blk %p\n", CmdBlkP);
|
||||
rio_dprintk(RIO_DEBUG_CTRL, "Queue MSET/MBIC/MBIS command "
|
||||
"blk %p\n", CmdBlkP);
|
||||
break;
|
||||
|
||||
case WFLUSH:
|
||||
|
@ -1465,12 +1476,14 @@ int RIOPreemptiveCmd(struct rio_info *p, struct Port *PortP, u8 Cmd)
|
|||
** allowed then we should not bother sending any more to the
|
||||
** RTA.
|
||||
*/
|
||||
if ((int) ((char) PortP->WflushFlag) == (int) -1) {
|
||||
rio_dprintk(RIO_DEBUG_CTRL, "Trashed WFLUSH, WflushFlag about to wrap!");
|
||||
if (PortP->WflushFlag == (typeof(PortP->WflushFlag))-1) {
|
||||
rio_dprintk(RIO_DEBUG_CTRL, "Trashed WFLUSH, "
|
||||
"WflushFlag about to wrap!");
|
||||
RIOFreeCmdBlk(CmdBlkP);
|
||||
return (RIO_FAIL);
|
||||
} else {
|
||||
rio_dprintk(RIO_DEBUG_CTRL, "Queue WFLUSH command blk %p\n", CmdBlkP);
|
||||
rio_dprintk(RIO_DEBUG_CTRL, "Queue WFLUSH command "
|
||||
"blk %p\n", CmdBlkP);
|
||||
CmdBlkP->PostFuncP = RIOWFlushMark;
|
||||
}
|
||||
break;
|
||||
|
|
|
@ -33,10 +33,6 @@
|
|||
#ifndef __rioioctl_h__
|
||||
#define __rioioctl_h__
|
||||
|
||||
#ifdef SCCS_LABELS
|
||||
static char *_rioioctl_h_sccs_ = "@(#)rioioctl.h 1.2";
|
||||
#endif
|
||||
|
||||
/*
|
||||
** RIO device driver - user ioctls and associated structures.
|
||||
*/
|
||||
|
@ -44,55 +40,13 @@ static char *_rioioctl_h_sccs_ = "@(#)rioioctl.h 1.2";
|
|||
struct portStats {
|
||||
int port;
|
||||
int gather;
|
||||
ulong txchars;
|
||||
ulong rxchars;
|
||||
ulong opens;
|
||||
ulong closes;
|
||||
ulong ioctls;
|
||||
unsigned long txchars;
|
||||
unsigned long rxchars;
|
||||
unsigned long opens;
|
||||
unsigned long closes;
|
||||
unsigned long ioctls;
|
||||
};
|
||||
|
||||
|
||||
#define rIOC ('r'<<8)
|
||||
#define TCRIOSTATE (rIOC | 1)
|
||||
#define TCRIOXPON (rIOC | 2)
|
||||
#define TCRIOXPOFF (rIOC | 3)
|
||||
#define TCRIOXPCPS (rIOC | 4)
|
||||
#define TCRIOXPRINT (rIOC | 5)
|
||||
#define TCRIOIXANYON (rIOC | 6)
|
||||
#define TCRIOIXANYOFF (rIOC | 7)
|
||||
#define TCRIOIXONON (rIOC | 8)
|
||||
#define TCRIOIXONOFF (rIOC | 9)
|
||||
#define TCRIOMBIS (rIOC | 10)
|
||||
#define TCRIOMBIC (rIOC | 11)
|
||||
#define TCRIOTRIAD (rIOC | 12)
|
||||
#define TCRIOTSTATE (rIOC | 13)
|
||||
|
||||
/*
|
||||
** 15.10.1998 ARG - ESIL 0761 part fix
|
||||
** Add RIO ioctls for manipulating RTS and CTS flow control, (as LynxOS
|
||||
** appears to not support hardware flow control).
|
||||
*/
|
||||
#define TCRIOCTSFLOWEN (rIOC | 14) /* enable CTS flow control */
|
||||
#define TCRIOCTSFLOWDIS (rIOC | 15) /* disable CTS flow control */
|
||||
#define TCRIORTSFLOWEN (rIOC | 16) /* enable RTS flow control */
|
||||
#define TCRIORTSFLOWDIS (rIOC | 17) /* disable RTS flow control */
|
||||
|
||||
/*
|
||||
** 09.12.1998 ARG - ESIL 0776 part fix
|
||||
** Definition for 'RIOC' also appears in daemon.h, so we'd better do a
|
||||
** #ifndef here first.
|
||||
** 'RIO_QUICK_CHECK' also #define'd here as this ioctl is now
|
||||
** allowed to be used by customers.
|
||||
**
|
||||
** 05.02.1999 ARG -
|
||||
** This is what I've decied to do with ioctls etc., which are intended to be
|
||||
** invoked from users applications :
|
||||
** Anything that needs to be defined here will be removed from daemon.h, that
|
||||
** way it won't end up having to be defined/maintained in two places. The only
|
||||
** consequence of this is that this file should now be #include'd by daemon.h
|
||||
**
|
||||
** 'stats' ioctls now #define'd here as they are to be used by customers.
|
||||
*/
|
||||
#define RIOC ('R'<<8)|('i'<<16)|('o'<<24)
|
||||
|
||||
#define RIO_QUICK_CHECK (RIOC | 105)
|
||||
|
|
|
@ -22,7 +22,7 @@ config TCG_TPM
|
|||
|
||||
config TCG_TIS
|
||||
tristate "TPM Interface Specification 1.2 Interface"
|
||||
depends on TCG_TPM
|
||||
depends on TCG_TPM && PNPACPI
|
||||
---help---
|
||||
If you have a TPM security chip that is compliant with the
|
||||
TCG TIS 1.2 TPM specification say Yes and it will be accessible
|
||||
|
|
|
@ -140,7 +140,7 @@ extern int tpm_pm_resume(struct device *);
|
|||
extern struct dentry ** tpm_bios_log_setup(char *);
|
||||
extern void tpm_bios_log_teardown(struct dentry **);
|
||||
#else
|
||||
static inline struct dentry* tpm_bios_log_setup(char *name)
|
||||
static inline struct dentry ** tpm_bios_log_setup(char *name)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
@ -55,7 +55,7 @@ enum tis_int_flags {
|
|||
};
|
||||
|
||||
enum tis_defaults {
|
||||
TIS_MEM_BASE = 0xFED4000,
|
||||
TIS_MEM_BASE = 0xFED40000,
|
||||
TIS_MEM_LEN = 0x5000,
|
||||
TIS_SHORT_TIMEOUT = 750, /* ms */
|
||||
TIS_LONG_TIMEOUT = 2000, /* 2 sec */
|
||||
|
|
|
@ -398,7 +398,7 @@ int tty_insert_flip_string_flags(struct tty_struct *tty,
|
|||
while (unlikely(size > copied));
|
||||
return copied;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tty_insert_flip_string_flags);
|
||||
EXPORT_SYMBOL(tty_insert_flip_string_flags);
|
||||
|
||||
void tty_schedule_flip(struct tty_struct *tty)
|
||||
{
|
||||
|
|
|
@ -33,11 +33,6 @@
|
|||
* 82801E (C-ICH) : document number 273599-001, 273645-002,
|
||||
* 82801EB (ICH5) : document number 252516-001, 252517-003,
|
||||
* 82801ER (ICH5R) : document number 252516-001, 252517-003,
|
||||
* 82801FB (ICH6) : document number 301473-002, 301474-007,
|
||||
* 82801FR (ICH6R) : document number 301473-002, 301474-007,
|
||||
* 82801FBM (ICH6-M) : document number 301473-002, 301474-007,
|
||||
* 82801FW (ICH6W) : document number 301473-001, 301474-007,
|
||||
* 82801FRW (ICH6RW) : document number 301473-001, 301474-007
|
||||
*
|
||||
* 20000710 Nils Faerber
|
||||
* Initial Version 0.01
|
||||
|
@ -66,6 +61,10 @@
|
|||
* 20050807 Wim Van Sebroeck <wim@iguana.be>
|
||||
* 0.08 Make sure that the watchdog is only "armed" when started.
|
||||
* (Kernel Bug 4251)
|
||||
* 20060416 Wim Van Sebroeck <wim@iguana.be>
|
||||
* 0.09 Remove support for the ICH6, ICH6R, ICH6-M, ICH6W and ICH6RW and
|
||||
* ICH7 chipsets. (See Kernel Bug 6031 - other code will support these
|
||||
* chipsets)
|
||||
*/
|
||||
|
||||
/*
|
||||
|
@ -90,7 +89,7 @@
|
|||
#include "i8xx_tco.h"
|
||||
|
||||
/* Module and version information */
|
||||
#define TCO_VERSION "0.08"
|
||||
#define TCO_VERSION "0.09"
|
||||
#define TCO_MODULE_NAME "i8xx TCO timer"
|
||||
#define TCO_DRIVER_NAME TCO_MODULE_NAME ", v" TCO_VERSION
|
||||
#define PFX TCO_MODULE_NAME ": "
|
||||
|
@ -391,11 +390,6 @@ static struct pci_device_id i8xx_tco_pci_tbl[] = {
|
|||
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, PCI_ANY_ID, PCI_ANY_ID, },
|
||||
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801E_0, PCI_ANY_ID, PCI_ANY_ID, },
|
||||
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, PCI_ANY_ID, PCI_ANY_ID, },
|
||||
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0, PCI_ANY_ID, PCI_ANY_ID, },
|
||||
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, PCI_ANY_ID, PCI_ANY_ID, },
|
||||
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_2, PCI_ANY_ID, PCI_ANY_ID, },
|
||||
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0, PCI_ANY_ID, PCI_ANY_ID, },
|
||||
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1, PCI_ANY_ID, PCI_ANY_ID, },
|
||||
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1, PCI_ANY_ID, PCI_ANY_ID, },
|
||||
{ 0, }, /* End of list */
|
||||
};
|
||||
|
|
|
@ -423,6 +423,12 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
|
|||
if (tmr_atboot && started == 0) {
|
||||
printk(KERN_INFO PFX "Starting Watchdog Timer\n");
|
||||
s3c2410wdt_start();
|
||||
} else if (!tmr_atboot) {
|
||||
/* if we're not enabling the watchdog, then ensure it is
|
||||
* disabled if it has been left running from the bootloader
|
||||
* or other source */
|
||||
|
||||
s3c2410wdt_stop();
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -377,7 +377,7 @@ static int __init sc1200wdt_init(void)
|
|||
{
|
||||
int ret;
|
||||
|
||||
printk(banner);
|
||||
printk("%s\n", banner);
|
||||
|
||||
spin_lock_init(&sc1200wdt_lock);
|
||||
sema_init(&open_sem, 1);
|
||||
|
|
|
@ -133,6 +133,9 @@ static void scx200_acb_machine(struct scx200_acb_iface *iface, u8 status)
|
|||
|
||||
outb(inb(ACBCTL1) | ACBCTL1_STOP, ACBCTL1);
|
||||
outb(ACBST_STASTR | ACBST_NEGACK, ACBST);
|
||||
|
||||
/* Reset the status register */
|
||||
outb(0, ACBST);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -228,6 +231,10 @@ static void scx200_acb_poll(struct scx200_acb_iface *iface)
|
|||
timeout = jiffies + POLL_TIMEOUT;
|
||||
while (time_before(jiffies, timeout)) {
|
||||
status = inb(ACBST);
|
||||
|
||||
/* Reset the status register to avoid the hang */
|
||||
outb(0, ACBST);
|
||||
|
||||
if ((status & (ACBST_SDAST|ACBST_BER|ACBST_NEGACK)) != 0) {
|
||||
scx200_acb_machine(iface, status);
|
||||
return;
|
||||
|
@ -415,7 +422,6 @@ static int __init scx200_acb_create(const char *text, int base, int index)
|
|||
struct scx200_acb_iface *iface;
|
||||
struct i2c_adapter *adapter;
|
||||
int rc;
|
||||
char description[64];
|
||||
|
||||
iface = kzalloc(sizeof(*iface), GFP_KERNEL);
|
||||
if (!iface) {
|
||||
|
@ -434,10 +440,7 @@ static int __init scx200_acb_create(const char *text, int base, int index)
|
|||
|
||||
mutex_init(&iface->mutex);
|
||||
|
||||
snprintf(description, sizeof(description), "%s ACCESS.bus [%s]",
|
||||
text, adapter->name);
|
||||
|
||||
if (request_region(base, 8, description) == 0) {
|
||||
if (!request_region(base, 8, adapter->name)) {
|
||||
printk(KERN_ERR NAME ": can't allocate io 0x%x-0x%x\n",
|
||||
base, base + 8-1);
|
||||
rc = -EBUSY;
|
||||
|
@ -524,6 +527,9 @@ static int __init scx200_acb_init(void)
|
|||
} else if (pci_dev_present(divil_pci))
|
||||
rc = scx200_add_cs553x();
|
||||
|
||||
/* If at least one bus was created, init must succeed */
|
||||
if (scx200_acb_list)
|
||||
return 0;
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
|
|
@ -392,6 +392,7 @@ static struct pcmcia_device_id ide_ids[] = {
|
|||
PCMCIA_DEVICE_PROD_ID12("FREECOM", "PCCARD-IDE", 0x5714cbf7, 0x48e0ab8e),
|
||||
PCMCIA_DEVICE_PROD_ID12("HITACHI", "FLASH", 0xf4f43949, 0x9eb86aae),
|
||||
PCMCIA_DEVICE_PROD_ID12("HITACHI", "microdrive", 0xf4f43949, 0xa6d76178),
|
||||
PCMCIA_DEVICE_PROD_ID12("IBM", "microdrive", 0xb569a6e5, 0xa6d76178),
|
||||
PCMCIA_DEVICE_PROD_ID12("IBM", "IBM17JSSFP20", 0xb569a6e5, 0xf2508753),
|
||||
PCMCIA_DEVICE_PROD_ID12("IO DATA", "CBIDE2 ", 0x547e66dc, 0x8671043b),
|
||||
PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDE", 0x547e66dc, 0x5c5ab149),
|
||||
|
|
|
@ -553,6 +553,8 @@ pmac_ide_init_hwif_ports(hw_regs_t *hw,
|
|||
|
||||
if (irq != NULL)
|
||||
*irq = pmac_ide[ix].irq;
|
||||
|
||||
hw->dev = &pmac_ide[ix].mdev->ofdev.dev;
|
||||
}
|
||||
|
||||
#define PMAC_IDE_REG(x) ((void __iomem *)(IDE_DATA_REG+(x)))
|
||||
|
|
|
@ -553,7 +553,7 @@ static void ohci_initialize(struct ti_ohci *ohci)
|
|||
* register content.
|
||||
* To actually enable physical responses is the job of our interrupt
|
||||
* handler which programs the physical request filter. */
|
||||
reg_write(ohci, OHCI1394_PhyUpperBound, 0xffff0000);
|
||||
reg_write(ohci, OHCI1394_PhyUpperBound, 0x01000000);
|
||||
|
||||
DBGMSG("physUpperBoundOffset=%08x",
|
||||
reg_read(ohci, OHCI1394_PhyUpperBound));
|
||||
|
|
|
@ -42,6 +42,7 @@
|
|||
#include <linux/kernel.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/stringify.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/fs.h>
|
||||
|
@ -117,7 +118,8 @@ MODULE_PARM_DESC(serialize_io, "Serialize I/O coming from scsi drivers (default
|
|||
*/
|
||||
static int max_sectors = SBP2_MAX_SECTORS;
|
||||
module_param(max_sectors, int, 0444);
|
||||
MODULE_PARM_DESC(max_sectors, "Change max sectors per I/O supported (default = 255)");
|
||||
MODULE_PARM_DESC(max_sectors, "Change max sectors per I/O supported (default = "
|
||||
__stringify(SBP2_MAX_SECTORS) ")");
|
||||
|
||||
/*
|
||||
* Exclusive login to sbp2 device? In most cases, the sbp2 driver should
|
||||
|
@ -135,18 +137,45 @@ module_param(exclusive_login, int, 0644);
|
|||
MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device (default = 1)");
|
||||
|
||||
/*
|
||||
* SCSI inquiry hack for really badly behaved sbp2 devices. Turn this on
|
||||
* if your sbp2 device is not properly handling the SCSI inquiry command.
|
||||
* This hack makes the inquiry look more like a typical MS Windows inquiry
|
||||
* by enforcing 36 byte inquiry and avoiding access to mode_sense page 8.
|
||||
* If any of the following workarounds is required for your device to work,
|
||||
* please submit the kernel messages logged by sbp2 to the linux1394-devel
|
||||
* mailing list.
|
||||
*
|
||||
* If force_inquiry_hack=1 is required for your device to work,
|
||||
* please submit the logged sbp2_firmware_revision value of this device to
|
||||
* the linux1394-devel mailing list.
|
||||
* - 128kB max transfer
|
||||
* Limit transfer size. Necessary for some old bridges.
|
||||
*
|
||||
* - 36 byte inquiry
|
||||
* When scsi_mod probes the device, let the inquiry command look like that
|
||||
* from MS Windows.
|
||||
*
|
||||
* - skip mode page 8
|
||||
* Suppress sending of mode_sense for mode page 8 if the device pretends to
|
||||
* support the SCSI Primary Block commands instead of Reduced Block Commands.
|
||||
*
|
||||
* - fix capacity
|
||||
* Tell sd_mod to correct the last sector number reported by read_capacity.
|
||||
* Avoids access beyond actual disk limits on devices with an off-by-one bug.
|
||||
* Don't use this with devices which don't have this bug.
|
||||
*
|
||||
* - override internal blacklist
|
||||
* Instead of adding to the built-in blacklist, use only the workarounds
|
||||
* specified in the module load parameter.
|
||||
* Useful if a blacklist entry interfered with a non-broken device.
|
||||
*/
|
||||
static int sbp2_default_workarounds;
|
||||
module_param_named(workarounds, sbp2_default_workarounds, int, 0644);
|
||||
MODULE_PARM_DESC(workarounds, "Work around device bugs (default = 0"
|
||||
", 128kB max transfer = " __stringify(SBP2_WORKAROUND_128K_MAX_TRANS)
|
||||
", 36 byte inquiry = " __stringify(SBP2_WORKAROUND_INQUIRY_36)
|
||||
", skip mode page 8 = " __stringify(SBP2_WORKAROUND_MODE_SENSE_8)
|
||||
", fix capacity = " __stringify(SBP2_WORKAROUND_FIX_CAPACITY)
|
||||
", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE)
|
||||
", or a combination)");
|
||||
|
||||
/* legacy parameter */
|
||||
static int force_inquiry_hack;
|
||||
module_param(force_inquiry_hack, int, 0644);
|
||||
MODULE_PARM_DESC(force_inquiry_hack, "Force SCSI inquiry hack (default = 0)");
|
||||
MODULE_PARM_DESC(force_inquiry_hack, "Deprecated, use 'workarounds'");
|
||||
|
||||
/*
|
||||
* Export information about protocols/devices supported by this driver.
|
||||
|
@ -266,14 +295,55 @@ static struct hpsb_protocol_driver sbp2_driver = {
|
|||
};
|
||||
|
||||
/*
|
||||
* List of device firmwares that require the inquiry hack.
|
||||
* Yields a few false positives but did not break other devices so far.
|
||||
* List of devices with known bugs.
|
||||
*
|
||||
* The firmware_revision field, masked with 0xffff00, is the best indicator
|
||||
* for the type of bridge chip of a device. It yields a few false positives
|
||||
* but this did not break correctly behaving devices so far.
|
||||
*/
|
||||
static u32 sbp2_broken_inquiry_list[] = {
|
||||
0x00002800, /* Stefan Richter <stefanr@s5r6.in-berlin.de> */
|
||||
/* DViCO Momobay CX-1 */
|
||||
0x00000200 /* Andreas Plesch <plesch@fas.harvard.edu> */
|
||||
/* QPS Fire DVDBurner */
|
||||
static const struct {
|
||||
u32 firmware_revision;
|
||||
u32 model_id;
|
||||
unsigned workarounds;
|
||||
} sbp2_workarounds_table[] = {
|
||||
/* TSB42AA9 */ {
|
||||
.firmware_revision = 0x002800,
|
||||
.workarounds = SBP2_WORKAROUND_INQUIRY_36 |
|
||||
SBP2_WORKAROUND_MODE_SENSE_8,
|
||||
},
|
||||
/* Initio bridges, actually only needed for some older ones */ {
|
||||
.firmware_revision = 0x000200,
|
||||
.workarounds = SBP2_WORKAROUND_INQUIRY_36,
|
||||
},
|
||||
/* Symbios bridge */ {
|
||||
.firmware_revision = 0xa0b800,
|
||||
.workarounds = SBP2_WORKAROUND_128K_MAX_TRANS,
|
||||
},
|
||||
/*
|
||||
* Note about the following Apple iPod blacklist entries:
|
||||
*
|
||||
* There are iPods (2nd gen, 3rd gen) with model_id==0. Since our
|
||||
* matching logic treats 0 as a wildcard, we cannot match this ID
|
||||
* without rewriting the matching routine. Fortunately these iPods
|
||||
* do not feature the read_capacity bug according to one report.
|
||||
* Read_capacity behaviour as well as model_id could change due to
|
||||
* Apple-supplied firmware updates though.
|
||||
*/
|
||||
/* iPod 4th generation */ {
|
||||
.firmware_revision = 0x0a2700,
|
||||
.model_id = 0x000021,
|
||||
.workarounds = SBP2_WORKAROUND_FIX_CAPACITY,
|
||||
},
|
||||
/* iPod mini */ {
|
||||
.firmware_revision = 0x0a2700,
|
||||
.model_id = 0x000023,
|
||||
.workarounds = SBP2_WORKAROUND_FIX_CAPACITY,
|
||||
},
|
||||
/* iPod Photo */ {
|
||||
.firmware_revision = 0x0a2700,
|
||||
.model_id = 0x00007e,
|
||||
.workarounds = SBP2_WORKAROUND_FIX_CAPACITY,
|
||||
}
|
||||
};
|
||||
|
||||
/**************************************
|
||||
|
@ -765,11 +835,16 @@ static struct scsi_id_instance_data *sbp2_alloc_device(struct unit_directory *ud
|
|||
|
||||
/* Register the status FIFO address range. We could use the same FIFO
|
||||
* for targets at different nodes. However we need different FIFOs per
|
||||
* target in order to support multi-unit devices. */
|
||||
* target in order to support multi-unit devices.
|
||||
* The FIFO is located out of the local host controller's physical range
|
||||
* but, if possible, within the posted write area. Status writes will
|
||||
* then be performed as unified transactions. This slightly reduces
|
||||
* bandwidth usage, and some Prolific based devices seem to require it.
|
||||
*/
|
||||
scsi_id->status_fifo_addr = hpsb_allocate_and_register_addrspace(
|
||||
&sbp2_highlevel, ud->ne->host, &sbp2_ops,
|
||||
sizeof(struct sbp2_status_block), sizeof(quadlet_t),
|
||||
~0ULL, ~0ULL);
|
||||
0x010000000000ULL, CSR1212_ALL_SPACE_END);
|
||||
if (!scsi_id->status_fifo_addr) {
|
||||
SBP2_ERR("failed to allocate status FIFO address range");
|
||||
goto failed_alloc;
|
||||
|
@ -1450,7 +1525,8 @@ static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id,
|
|||
struct csr1212_dentry *dentry;
|
||||
u64 management_agent_addr;
|
||||
u32 command_set_spec_id, command_set, unit_characteristics,
|
||||
firmware_revision, workarounds;
|
||||
firmware_revision;
|
||||
unsigned workarounds;
|
||||
int i;
|
||||
|
||||
SBP2_DEBUG_ENTER();
|
||||
|
@ -1506,12 +1582,8 @@ static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id,
|
|||
case SBP2_FIRMWARE_REVISION_KEY:
|
||||
/* Firmware revision */
|
||||
firmware_revision = kv->value.immediate;
|
||||
if (force_inquiry_hack)
|
||||
SBP2_INFO("sbp2_firmware_revision = %x",
|
||||
(unsigned int)firmware_revision);
|
||||
else
|
||||
SBP2_DEBUG("sbp2_firmware_revision = %x",
|
||||
(unsigned int)firmware_revision);
|
||||
SBP2_DEBUG("sbp2_firmware_revision = %x",
|
||||
(unsigned int)firmware_revision);
|
||||
break;
|
||||
|
||||
default:
|
||||
|
@ -1519,41 +1591,44 @@ static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id,
|
|||
}
|
||||
}
|
||||
|
||||
/* This is the start of our broken device checking. We try to hack
|
||||
* around oddities and known defects. */
|
||||
workarounds = 0x0;
|
||||
|
||||
/* If the vendor id is 0xa0b8 (Symbios vendor id), then we have a
|
||||
* bridge with 128KB max transfer size limitation. For sanity, we
|
||||
* only voice this when the current max_sectors setting
|
||||
* exceeds the 128k limit. By default, that is not the case.
|
||||
*
|
||||
* It would be really nice if we could detect this before the scsi
|
||||
* host gets initialized. That way we can down-force the
|
||||
* max_sectors to account for it. That is not currently
|
||||
* possible. */
|
||||
if ((firmware_revision & 0xffff00) ==
|
||||
SBP2_128KB_BROKEN_FIRMWARE &&
|
||||
(max_sectors * 512) > (128*1024)) {
|
||||
SBP2_WARN("Node " NODE_BUS_FMT ": Bridge only supports 128KB max transfer size.",
|
||||
NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid));
|
||||
SBP2_WARN("WARNING: Current max_sectors setting is larger than 128KB (%d sectors)!",
|
||||
max_sectors);
|
||||
workarounds |= SBP2_BREAKAGE_128K_MAX_TRANSFER;
|
||||
workarounds = sbp2_default_workarounds;
|
||||
if (force_inquiry_hack) {
|
||||
SBP2_WARN("force_inquiry_hack is deprecated. "
|
||||
"Use parameter 'workarounds' instead.");
|
||||
workarounds |= SBP2_WORKAROUND_INQUIRY_36;
|
||||
}
|
||||
|
||||
/* Check for a blacklisted set of devices that require us to force
|
||||
* a 36 byte host inquiry. This can be overriden as a module param
|
||||
* (to force all hosts). */
|
||||
for (i = 0; i < ARRAY_SIZE(sbp2_broken_inquiry_list); i++) {
|
||||
if ((firmware_revision & 0xffff00) ==
|
||||
sbp2_broken_inquiry_list[i]) {
|
||||
SBP2_WARN("Node " NODE_BUS_FMT ": Using 36byte inquiry workaround",
|
||||
NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid));
|
||||
workarounds |= SBP2_BREAKAGE_INQUIRY_HACK;
|
||||
break; /* No need to continue. */
|
||||
if (!(workarounds & SBP2_WORKAROUND_OVERRIDE))
|
||||
for (i = 0; i < ARRAY_SIZE(sbp2_workarounds_table); i++) {
|
||||
if (sbp2_workarounds_table[i].firmware_revision &&
|
||||
sbp2_workarounds_table[i].firmware_revision !=
|
||||
(firmware_revision & 0xffff00))
|
||||
continue;
|
||||
if (sbp2_workarounds_table[i].model_id &&
|
||||
sbp2_workarounds_table[i].model_id != ud->model_id)
|
||||
continue;
|
||||
workarounds |= sbp2_workarounds_table[i].workarounds;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (workarounds)
|
||||
SBP2_INFO("Workarounds for node " NODE_BUS_FMT ": 0x%x "
|
||||
"(firmware_revision 0x%06x, vendor_id 0x%06x,"
|
||||
" model_id 0x%06x)",
|
||||
NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid),
|
||||
workarounds, firmware_revision,
|
||||
ud->vendor_id ? ud->vendor_id : ud->ne->vendor_id,
|
||||
ud->model_id);
|
||||
|
||||
/* We would need one SCSI host template for each target to adjust
|
||||
* max_sectors on the fly, therefore warn only. */
|
||||
if (workarounds & SBP2_WORKAROUND_128K_MAX_TRANS &&
|
||||
(max_sectors * 512) > (128 * 1024))
|
||||
SBP2_WARN("Node " NODE_BUS_FMT ": Bridge only supports 128KB "
|
||||
"max transfer size. WARNING: Current max_sectors "
|
||||
"setting is larger than 128KB (%d sectors)",
|
||||
NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid),
|
||||
max_sectors);
|
||||
|
||||
/* If this is a logical unit directory entry, process the parent
|
||||
* to get the values. */
|
||||
|
@ -2447,19 +2522,25 @@ static int sbp2scsi_slave_alloc(struct scsi_device *sdev)
|
|||
|
||||
scsi_id->sdev = sdev;
|
||||
|
||||
if (force_inquiry_hack ||
|
||||
scsi_id->workarounds & SBP2_BREAKAGE_INQUIRY_HACK) {
|
||||
if (scsi_id->workarounds & SBP2_WORKAROUND_INQUIRY_36)
|
||||
sdev->inquiry_len = 36;
|
||||
sdev->skip_ms_page_8 = 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sbp2scsi_slave_configure(struct scsi_device *sdev)
|
||||
{
|
||||
struct scsi_id_instance_data *scsi_id =
|
||||
(struct scsi_id_instance_data *)sdev->host->hostdata[0];
|
||||
|
||||
blk_queue_dma_alignment(sdev->request_queue, (512 - 1));
|
||||
sdev->use_10_for_rw = 1;
|
||||
sdev->use_10_for_ms = 1;
|
||||
|
||||
if (sdev->type == TYPE_DISK &&
|
||||
scsi_id->workarounds & SBP2_WORKAROUND_MODE_SENSE_8)
|
||||
sdev->skip_ms_page_8 = 1;
|
||||
if (scsi_id->workarounds & SBP2_WORKAROUND_FIX_CAPACITY)
|
||||
sdev->fix_capacity = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2603,7 +2684,9 @@ static int sbp2_module_init(void)
|
|||
scsi_driver_template.cmd_per_lun = 1;
|
||||
}
|
||||
|
||||
/* Set max sectors (module load option). Default is 255 sectors. */
|
||||
if (sbp2_default_workarounds & SBP2_WORKAROUND_128K_MAX_TRANS &&
|
||||
(max_sectors * 512) > (128 * 1024))
|
||||
max_sectors = 128 * 1024 / 512;
|
||||
scsi_driver_template.max_sectors = max_sectors;
|
||||
|
||||
/* Register our high level driver with 1394 stack */
|
||||
|
|
|
@ -226,11 +226,6 @@ struct sbp2_status_block {
|
|||
#define SBP2_UNIT_SPEC_ID_ENTRY 0x0000609e
|
||||
#define SBP2_SW_VERSION_ENTRY 0x00010483
|
||||
|
||||
/*
|
||||
* Other misc defines
|
||||
*/
|
||||
#define SBP2_128KB_BROKEN_FIRMWARE 0xa0b800
|
||||
|
||||
/*
|
||||
* SCSI specific stuff
|
||||
*/
|
||||
|
@ -239,6 +234,13 @@ struct sbp2_status_block {
|
|||
#define SBP2_MAX_SECTORS 255 /* Max sectors supported */
|
||||
#define SBP2_MAX_CMDS 8 /* This should be safe */
|
||||
|
||||
/* Flags for detected oddities and brokeness */
|
||||
#define SBP2_WORKAROUND_128K_MAX_TRANS 0x1
|
||||
#define SBP2_WORKAROUND_INQUIRY_36 0x2
|
||||
#define SBP2_WORKAROUND_MODE_SENSE_8 0x4
|
||||
#define SBP2_WORKAROUND_FIX_CAPACITY 0x8
|
||||
#define SBP2_WORKAROUND_OVERRIDE 0x100
|
||||
|
||||
/* This is the two dma types we use for cmd_dma below */
|
||||
enum cmd_dma_types {
|
||||
CMD_DMA_NONE,
|
||||
|
@ -268,10 +270,6 @@ struct sbp2_command_info {
|
|||
|
||||
};
|
||||
|
||||
/* A list of flags for detected oddities and brokeness. */
|
||||
#define SBP2_BREAKAGE_128K_MAX_TRANSFER 0x1
|
||||
#define SBP2_BREAKAGE_INQUIRY_HACK 0x2
|
||||
|
||||
struct sbp2scsi_host_info;
|
||||
|
||||
/*
|
||||
|
@ -345,7 +343,7 @@ struct scsi_id_instance_data {
|
|||
struct Scsi_Host *scsi_host;
|
||||
|
||||
/* Device specific workarounds/brokeness */
|
||||
u32 workarounds;
|
||||
unsigned workarounds;
|
||||
};
|
||||
|
||||
/* Sbp2 host data structure (one per IEEE1394 host) */
|
||||
|
|
|
@ -34,6 +34,8 @@
|
|||
*
|
||||
* $Id: cm.c 2821 2005-07-08 17:07:28Z sean.hefty $
|
||||
*/
|
||||
|
||||
#include <linux/completion.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/idr.h>
|
||||
|
@ -122,7 +124,7 @@ struct cm_id_private {
|
|||
struct rb_node service_node;
|
||||
struct rb_node sidr_id_node;
|
||||
spinlock_t lock; /* Do not acquire inside cm.lock */
|
||||
wait_queue_head_t wait;
|
||||
struct completion comp;
|
||||
atomic_t refcount;
|
||||
|
||||
struct ib_mad_send_buf *msg;
|
||||
|
@ -159,7 +161,7 @@ static void cm_work_handler(void *data);
|
|||
static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
|
||||
{
|
||||
if (atomic_dec_and_test(&cm_id_priv->refcount))
|
||||
wake_up(&cm_id_priv->wait);
|
||||
complete(&cm_id_priv->comp);
|
||||
}
|
||||
|
||||
static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
|
||||
|
@ -559,7 +561,7 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
|
|||
goto error;
|
||||
|
||||
spin_lock_init(&cm_id_priv->lock);
|
||||
init_waitqueue_head(&cm_id_priv->wait);
|
||||
init_completion(&cm_id_priv->comp);
|
||||
INIT_LIST_HEAD(&cm_id_priv->work_list);
|
||||
atomic_set(&cm_id_priv->work_count, -1);
|
||||
atomic_set(&cm_id_priv->refcount, 1);
|
||||
|
@ -724,8 +726,8 @@ retest:
|
|||
}
|
||||
|
||||
cm_free_id(cm_id->local_id);
|
||||
atomic_dec(&cm_id_priv->refcount);
|
||||
wait_event(cm_id_priv->wait, !atomic_read(&cm_id_priv->refcount));
|
||||
cm_deref_id(cm_id_priv);
|
||||
wait_for_completion(&cm_id_priv->comp);
|
||||
while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
|
||||
cm_free_work(work);
|
||||
if (cm_id_priv->private_data && cm_id_priv->private_data_len)
|
||||
|
|
|
@ -352,7 +352,7 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
|
|||
INIT_WORK(&mad_agent_priv->local_work, local_completions,
|
||||
mad_agent_priv);
|
||||
atomic_set(&mad_agent_priv->refcount, 1);
|
||||
init_waitqueue_head(&mad_agent_priv->wait);
|
||||
init_completion(&mad_agent_priv->comp);
|
||||
|
||||
return &mad_agent_priv->agent;
|
||||
|
||||
|
@ -467,7 +467,7 @@ struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
|
|||
mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp;
|
||||
mad_snoop_priv->agent.port_num = port_num;
|
||||
mad_snoop_priv->mad_snoop_flags = mad_snoop_flags;
|
||||
init_waitqueue_head(&mad_snoop_priv->wait);
|
||||
init_completion(&mad_snoop_priv->comp);
|
||||
mad_snoop_priv->snoop_index = register_snoop_agent(
|
||||
&port_priv->qp_info[qpn],
|
||||
mad_snoop_priv);
|
||||
|
@ -486,6 +486,18 @@ error1:
|
|||
}
|
||||
EXPORT_SYMBOL(ib_register_mad_snoop);
|
||||
|
||||
static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
|
||||
{
|
||||
if (atomic_dec_and_test(&mad_agent_priv->refcount))
|
||||
complete(&mad_agent_priv->comp);
|
||||
}
|
||||
|
||||
static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv)
|
||||
{
|
||||
if (atomic_dec_and_test(&mad_snoop_priv->refcount))
|
||||
complete(&mad_snoop_priv->comp);
|
||||
}
|
||||
|
||||
static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
|
||||
{
|
||||
struct ib_mad_port_private *port_priv;
|
||||
|
@ -509,9 +521,8 @@ static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
|
|||
flush_workqueue(port_priv->wq);
|
||||
ib_cancel_rmpp_recvs(mad_agent_priv);
|
||||
|
||||
atomic_dec(&mad_agent_priv->refcount);
|
||||
wait_event(mad_agent_priv->wait,
|
||||
!atomic_read(&mad_agent_priv->refcount));
|
||||
deref_mad_agent(mad_agent_priv);
|
||||
wait_for_completion(&mad_agent_priv->comp);
|
||||
|
||||
kfree(mad_agent_priv->reg_req);
|
||||
ib_dereg_mr(mad_agent_priv->agent.mr);
|
||||
|
@ -529,9 +540,8 @@ static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
|
|||
atomic_dec(&qp_info->snoop_count);
|
||||
spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
|
||||
|
||||
atomic_dec(&mad_snoop_priv->refcount);
|
||||
wait_event(mad_snoop_priv->wait,
|
||||
!atomic_read(&mad_snoop_priv->refcount));
|
||||
deref_snoop_agent(mad_snoop_priv);
|
||||
wait_for_completion(&mad_snoop_priv->comp);
|
||||
|
||||
kfree(mad_snoop_priv);
|
||||
}
|
||||
|
@ -600,8 +610,7 @@ static void snoop_send(struct ib_mad_qp_info *qp_info,
|
|||
spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
|
||||
mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent,
|
||||
send_buf, mad_send_wc);
|
||||
if (atomic_dec_and_test(&mad_snoop_priv->refcount))
|
||||
wake_up(&mad_snoop_priv->wait);
|
||||
deref_snoop_agent(mad_snoop_priv);
|
||||
spin_lock_irqsave(&qp_info->snoop_lock, flags);
|
||||
}
|
||||
spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
|
||||
|
@ -626,8 +635,7 @@ static void snoop_recv(struct ib_mad_qp_info *qp_info,
|
|||
spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
|
||||
mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent,
|
||||
mad_recv_wc);
|
||||
if (atomic_dec_and_test(&mad_snoop_priv->refcount))
|
||||
wake_up(&mad_snoop_priv->wait);
|
||||
deref_snoop_agent(mad_snoop_priv);
|
||||
spin_lock_irqsave(&qp_info->snoop_lock, flags);
|
||||
}
|
||||
spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
|
||||
|
@ -968,8 +976,7 @@ void ib_free_send_mad(struct ib_mad_send_buf *send_buf)
|
|||
|
||||
free_send_rmpp_list(mad_send_wr);
|
||||
kfree(send_buf->mad);
|
||||
if (atomic_dec_and_test(&mad_agent_priv->refcount))
|
||||
wake_up(&mad_agent_priv->wait);
|
||||
deref_mad_agent(mad_agent_priv);
|
||||
}
|
||||
EXPORT_SYMBOL(ib_free_send_mad);
|
||||
|
||||
|
@ -1757,8 +1764,7 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
|
|||
mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,
|
||||
mad_recv_wc);
|
||||
if (!mad_recv_wc) {
|
||||
if (atomic_dec_and_test(&mad_agent_priv->refcount))
|
||||
wake_up(&mad_agent_priv->wait);
|
||||
deref_mad_agent(mad_agent_priv);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
@ -1770,8 +1776,7 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
|
|||
if (!mad_send_wr) {
|
||||
spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
|
||||
ib_free_recv_mad(mad_recv_wc);
|
||||
if (atomic_dec_and_test(&mad_agent_priv->refcount))
|
||||
wake_up(&mad_agent_priv->wait);
|
||||
deref_mad_agent(mad_agent_priv);
|
||||
return;
|
||||
}
|
||||
ib_mark_mad_done(mad_send_wr);
|
||||
|
@ -1790,8 +1795,7 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
|
|||
} else {
|
||||
mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
|
||||
mad_recv_wc);
|
||||
if (atomic_dec_and_test(&mad_agent_priv->refcount))
|
||||
wake_up(&mad_agent_priv->wait);
|
||||
deref_mad_agent(mad_agent_priv);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2021,8 +2025,7 @@ void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
|
|||
mad_send_wc);
|
||||
|
||||
/* Release reference on agent taken when sending */
|
||||
if (atomic_dec_and_test(&mad_agent_priv->refcount))
|
||||
wake_up(&mad_agent_priv->wait);
|
||||
deref_mad_agent(mad_agent_priv);
|
||||
return;
|
||||
done:
|
||||
spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
|
||||
|
|
|
@ -37,6 +37,7 @@
|
|||
#ifndef __IB_MAD_PRIV_H__
|
||||
#define __IB_MAD_PRIV_H__
|
||||
|
||||
#include <linux/completion.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/workqueue.h>
|
||||
|
@ -108,7 +109,7 @@ struct ib_mad_agent_private {
|
|||
struct list_head rmpp_list;
|
||||
|
||||
atomic_t refcount;
|
||||
wait_queue_head_t wait;
|
||||
struct completion comp;
|
||||
};
|
||||
|
||||
struct ib_mad_snoop_private {
|
||||
|
@ -117,7 +118,7 @@ struct ib_mad_snoop_private {
|
|||
int snoop_index;
|
||||
int mad_snoop_flags;
|
||||
atomic_t refcount;
|
||||
wait_queue_head_t wait;
|
||||
struct completion comp;
|
||||
};
|
||||
|
||||
struct ib_mad_send_wr_private {
|
||||
|
|
|
@ -49,7 +49,7 @@ struct mad_rmpp_recv {
|
|||
struct list_head list;
|
||||
struct work_struct timeout_work;
|
||||
struct work_struct cleanup_work;
|
||||
wait_queue_head_t wait;
|
||||
struct completion comp;
|
||||
enum rmpp_state state;
|
||||
spinlock_t lock;
|
||||
atomic_t refcount;
|
||||
|
@ -69,10 +69,16 @@ struct mad_rmpp_recv {
|
|||
u8 method;
|
||||
};
|
||||
|
||||
static inline void deref_rmpp_recv(struct mad_rmpp_recv *rmpp_recv)
|
||||
{
|
||||
if (atomic_dec_and_test(&rmpp_recv->refcount))
|
||||
complete(&rmpp_recv->comp);
|
||||
}
|
||||
|
||||
static void destroy_rmpp_recv(struct mad_rmpp_recv *rmpp_recv)
|
||||
{
|
||||
atomic_dec(&rmpp_recv->refcount);
|
||||
wait_event(rmpp_recv->wait, !atomic_read(&rmpp_recv->refcount));
|
||||
deref_rmpp_recv(rmpp_recv);
|
||||
wait_for_completion(&rmpp_recv->comp);
|
||||
ib_destroy_ah(rmpp_recv->ah);
|
||||
kfree(rmpp_recv);
|
||||
}
|
||||
|
@ -253,7 +259,7 @@ create_rmpp_recv(struct ib_mad_agent_private *agent,
|
|||
goto error;
|
||||
|
||||
rmpp_recv->agent = agent;
|
||||
init_waitqueue_head(&rmpp_recv->wait);
|
||||
init_completion(&rmpp_recv->comp);
|
||||
INIT_WORK(&rmpp_recv->timeout_work, recv_timeout_handler, rmpp_recv);
|
||||
INIT_WORK(&rmpp_recv->cleanup_work, recv_cleanup_handler, rmpp_recv);
|
||||
spin_lock_init(&rmpp_recv->lock);
|
||||
|
@ -279,12 +285,6 @@ error: kfree(rmpp_recv);
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static inline void deref_rmpp_recv(struct mad_rmpp_recv *rmpp_recv)
|
||||
{
|
||||
if (atomic_dec_and_test(&rmpp_recv->refcount))
|
||||
wake_up(&rmpp_recv->wait);
|
||||
}
|
||||
|
||||
static struct mad_rmpp_recv *
|
||||
find_rmpp_recv(struct ib_mad_agent_private *agent,
|
||||
struct ib_mad_recv_wc *mad_recv_wc)
|
||||
|
|
|
@ -32,6 +32,8 @@
|
|||
*
|
||||
* $Id: ucm.c 2594 2005-06-13 19:46:02Z libor $
|
||||
*/
|
||||
|
||||
#include <linux/completion.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/module.h>
|
||||
|
@ -72,7 +74,7 @@ struct ib_ucm_file {
|
|||
|
||||
struct ib_ucm_context {
|
||||
int id;
|
||||
wait_queue_head_t wait;
|
||||
struct completion comp;
|
||||
atomic_t ref;
|
||||
int events_reported;
|
||||
|
||||
|
@ -138,7 +140,7 @@ static struct ib_ucm_context *ib_ucm_ctx_get(struct ib_ucm_file *file, int id)
|
|||
static void ib_ucm_ctx_put(struct ib_ucm_context *ctx)
|
||||
{
|
||||
if (atomic_dec_and_test(&ctx->ref))
|
||||
wake_up(&ctx->wait);
|
||||
complete(&ctx->comp);
|
||||
}
|
||||
|
||||
static inline int ib_ucm_new_cm_id(int event)
|
||||
|
@ -178,7 +180,7 @@ static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file)
|
|||
return NULL;
|
||||
|
||||
atomic_set(&ctx->ref, 1);
|
||||
init_waitqueue_head(&ctx->wait);
|
||||
init_completion(&ctx->comp);
|
||||
ctx->file = file;
|
||||
INIT_LIST_HEAD(&ctx->events);
|
||||
|
||||
|
@ -586,8 +588,8 @@ static ssize_t ib_ucm_destroy_id(struct ib_ucm_file *file,
|
|||
if (IS_ERR(ctx))
|
||||
return PTR_ERR(ctx);
|
||||
|
||||
atomic_dec(&ctx->ref);
|
||||
wait_event(ctx->wait, !atomic_read(&ctx->ref));
|
||||
ib_ucm_ctx_put(ctx);
|
||||
wait_for_completion(&ctx->comp);
|
||||
|
||||
/* No new events will be generated after destroying the cm_id. */
|
||||
ib_destroy_cm_id(ctx->cm_id);
|
||||
|
|
|
@ -211,8 +211,10 @@ void ib_umem_release_on_close(struct ib_device *dev, struct ib_umem *umem)
|
|||
*/
|
||||
|
||||
work = kmalloc(sizeof *work, GFP_KERNEL);
|
||||
if (!work)
|
||||
if (!work) {
|
||||
mmput(mm);
|
||||
return;
|
||||
}
|
||||
|
||||
INIT_WORK(&work->work, ib_umem_account, work);
|
||||
work->mm = mm;
|
||||
|
|
|
@ -116,10 +116,9 @@ static int __devinit ipath_init_one(struct pci_dev *,
|
|||
#define PCI_DEVICE_ID_INFINIPATH_PE800 0x10
|
||||
|
||||
static const struct pci_device_id ipath_pci_tbl[] = {
|
||||
{PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE,
|
||||
PCI_DEVICE_ID_INFINIPATH_HT)},
|
||||
{PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE,
|
||||
PCI_DEVICE_ID_INFINIPATH_PE800)},
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_HT) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_PE800) },
|
||||
{ 0, }
|
||||
};
|
||||
|
||||
MODULE_DEVICE_TABLE(pci, ipath_pci_tbl);
|
||||
|
@ -1906,19 +1905,19 @@ static void __exit infinipath_cleanup(void)
|
|||
} else
|
||||
ipath_dbg("irq is 0, not doing free_irq "
|
||||
"for unit %u\n", dd->ipath_unit);
|
||||
|
||||
/*
|
||||
* we check for NULL here, because it's outside
|
||||
* the kregbase check, and we need to call it
|
||||
* after the free_irq. Thus it's possible that
|
||||
* the function pointers were never initialized.
|
||||
*/
|
||||
if (dd->ipath_f_cleanup)
|
||||
/* clean up chip-specific stuff */
|
||||
dd->ipath_f_cleanup(dd);
|
||||
|
||||
dd->pcidev = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* we check for NULL here, because it's outside the kregbase
|
||||
* check, and we need to call it after the free_irq. Thus
|
||||
* it's possible that the function pointers were never
|
||||
* initialized.
|
||||
*/
|
||||
if (dd->ipath_f_cleanup)
|
||||
/* clean up chip-specific stuff */
|
||||
dd->ipath_f_cleanup(dd);
|
||||
|
||||
spin_lock_irqsave(&ipath_devs_lock, flags);
|
||||
}
|
||||
|
||||
|
|
|
@ -505,11 +505,10 @@ static u8 flash_csum(struct ipath_flash *ifp, int adjust)
|
|||
* ipath_get_guid - get the GUID from the i2c device
|
||||
* @dd: the infinipath device
|
||||
*
|
||||
* When we add the multi-chip support, we will probably have to add
|
||||
* the ability to use the number of guids field, and get the guid from
|
||||
* the first chip's flash, to use for all of them.
|
||||
* We have the capability to use the ipath_nguid field, and get
|
||||
* the guid from the first chip's flash, to use for all of them.
|
||||
*/
|
||||
void ipath_get_guid(struct ipath_devdata *dd)
|
||||
void ipath_get_eeprom_info(struct ipath_devdata *dd)
|
||||
{
|
||||
void *buf;
|
||||
struct ipath_flash *ifp;
|
||||
|
|
|
@ -139,7 +139,7 @@ static int ipath_get_base_info(struct ipath_portdata *pd,
|
|||
kinfo->spi_piosize = dd->ipath_ibmaxlen;
|
||||
kinfo->spi_mtu = dd->ipath_ibmaxlen; /* maxlen, not ibmtu */
|
||||
kinfo->spi_port = pd->port_port;
|
||||
kinfo->spi_sw_version = IPATH_USER_SWVERSION;
|
||||
kinfo->spi_sw_version = IPATH_KERN_SWVERSION;
|
||||
kinfo->spi_hw_version = dd->ipath_revision;
|
||||
|
||||
if (copy_to_user(ubase, kinfo, sizeof(*kinfo)))
|
||||
|
@ -1224,6 +1224,10 @@ static unsigned int ipath_poll(struct file *fp,
|
|||
|
||||
if (tail == head) {
|
||||
set_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag);
|
||||
if(dd->ipath_rhdrhead_intr_off) /* arm rcv interrupt */
|
||||
(void)ipath_write_ureg(dd, ur_rcvhdrhead,
|
||||
dd->ipath_rhdrhead_intr_off
|
||||
| head, pd->port_port);
|
||||
poll_wait(fp, &pd->port_wait, pt);
|
||||
|
||||
if (test_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag)) {
|
||||
|
|
|
@ -607,7 +607,12 @@ static int ipath_ht_boardname(struct ipath_devdata *dd, char *name,
|
|||
case 4: /* Ponderosa is one of the bringup boards */
|
||||
n = "Ponderosa";
|
||||
break;
|
||||
case 5: /* HT-460 original production board */
|
||||
case 5:
|
||||
/*
|
||||
* HT-460 original production board; two production levels, with
|
||||
* different serial number ranges. See ipath_ht_early_init() for
|
||||
* case where we enable IPATH_GPIO_INTR for later serial # range.
|
||||
*/
|
||||
n = "InfiniPath_HT-460";
|
||||
break;
|
||||
case 6:
|
||||
|
@ -642,7 +647,7 @@ static int ipath_ht_boardname(struct ipath_devdata *dd, char *name,
|
|||
if (n)
|
||||
snprintf(name, namelen, "%s", n);
|
||||
|
||||
if (dd->ipath_majrev != 3 || dd->ipath_minrev != 2) {
|
||||
if (dd->ipath_majrev != 3 || (dd->ipath_minrev < 2 || dd->ipath_minrev > 3)) {
|
||||
/*
|
||||
* This version of the driver only supports the HT-400
|
||||
* Rev 3.2
|
||||
|
@ -1520,6 +1525,18 @@ static int ipath_ht_early_init(struct ipath_devdata *dd)
|
|||
*/
|
||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
|
||||
INFINIPATH_S_ABORT);
|
||||
|
||||
ipath_get_eeprom_info(dd);
|
||||
if(dd->ipath_boardrev == 5 && dd->ipath_serial[0] == '1' &&
|
||||
dd->ipath_serial[1] == '2' && dd->ipath_serial[2] == '8') {
|
||||
/*
|
||||
* Later production HT-460 has same changes as HT-465, so
|
||||
* can use GPIO interrupts. They have serial #'s starting
|
||||
* with 128, rather than 112.
|
||||
*/
|
||||
dd->ipath_flags |= IPATH_GPIO_INTR;
|
||||
dd->ipath_flags &= ~IPATH_POLL_RX_INTR;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -879,7 +879,6 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
|
|||
|
||||
done:
|
||||
if (!ret) {
|
||||
ipath_get_guid(dd);
|
||||
*dd->ipath_statusp |= IPATH_STATUS_CHIP_PRESENT;
|
||||
if (!dd->ipath_f_intrsetup(dd)) {
|
||||
/* now we can enable all interrupts from the chip */
|
||||
|
|
|
@ -650,7 +650,7 @@ u32 __iomem *ipath_getpiobuf(struct ipath_devdata *, u32 *);
|
|||
void ipath_init_pe800_funcs(struct ipath_devdata *);
|
||||
/* init HT-400-specific func */
|
||||
void ipath_init_ht400_funcs(struct ipath_devdata *);
|
||||
void ipath_get_guid(struct ipath_devdata *);
|
||||
void ipath_get_eeprom_info(struct ipath_devdata *);
|
||||
u64 ipath_snap_cntr(struct ipath_devdata *, ipath_creg);
|
||||
|
||||
/*
|
||||
|
|
|
@ -136,9 +136,7 @@ int ipath_lkey_ok(struct ipath_lkey_table *rkt, struct ipath_sge *isge,
|
|||
ret = 1;
|
||||
goto bail;
|
||||
}
|
||||
spin_lock(&rkt->lock);
|
||||
mr = rkt->table[(sge->lkey >> (32 - ib_ipath_lkey_table_size))];
|
||||
spin_unlock(&rkt->lock);
|
||||
if (unlikely(mr == NULL || mr->lkey != sge->lkey)) {
|
||||
ret = 0;
|
||||
goto bail;
|
||||
|
@ -184,8 +182,6 @@ bail:
|
|||
* @acc: access flags
|
||||
*
|
||||
* Return 1 if successful, otherwise 0.
|
||||
*
|
||||
* The QP r_rq.lock should be held.
|
||||
*/
|
||||
int ipath_rkey_ok(struct ipath_ibdev *dev, struct ipath_sge_state *ss,
|
||||
u32 len, u64 vaddr, u32 rkey, int acc)
|
||||
|
@ -196,9 +192,7 @@ int ipath_rkey_ok(struct ipath_ibdev *dev, struct ipath_sge_state *ss,
|
|||
size_t off;
|
||||
int ret;
|
||||
|
||||
spin_lock(&rkt->lock);
|
||||
mr = rkt->table[(rkey >> (32 - ib_ipath_lkey_table_size))];
|
||||
spin_unlock(&rkt->lock);
|
||||
if (unlikely(mr == NULL || mr->lkey != rkey)) {
|
||||
ret = 0;
|
||||
goto bail;
|
||||
|
|
|
@ -872,12 +872,13 @@ static void copy_io(u32 __iomem *piobuf, struct ipath_sge_state *ss,
|
|||
update_sge(ss, len);
|
||||
length -= len;
|
||||
}
|
||||
/* Update address before sending packet. */
|
||||
update_sge(ss, length);
|
||||
/* must flush early everything before trigger word */
|
||||
ipath_flush_wc();
|
||||
__raw_writel(last, piobuf);
|
||||
/* be sure trigger word is written */
|
||||
ipath_flush_wc();
|
||||
update_sge(ss, length);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -943,17 +944,18 @@ int ipath_verbs_send(struct ipath_devdata *dd, u32 hdrwords,
|
|||
if (likely(ss->num_sge == 1 && len <= ss->sge.length &&
|
||||
!((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) {
|
||||
u32 w;
|
||||
u32 *addr = (u32 *) ss->sge.vaddr;
|
||||
|
||||
/* Update address before sending packet. */
|
||||
update_sge(ss, len);
|
||||
/* Need to round up for the last dword in the packet. */
|
||||
w = (len + 3) >> 2;
|
||||
__iowrite32_copy(piobuf, ss->sge.vaddr, w - 1);
|
||||
__iowrite32_copy(piobuf, addr, w - 1);
|
||||
/* must flush early everything before trigger word */
|
||||
ipath_flush_wc();
|
||||
__raw_writel(((u32 *) ss->sge.vaddr)[w - 1],
|
||||
piobuf + w - 1);
|
||||
__raw_writel(addr[w - 1], piobuf + w - 1);
|
||||
/* be sure trigger word is written */
|
||||
ipath_flush_wc();
|
||||
update_sge(ss, len);
|
||||
ret = 0;
|
||||
goto bail;
|
||||
}
|
||||
|
|
|
@ -1180,6 +1180,8 @@ static int ipath_pe_early_init(struct ipath_devdata *dd)
|
|||
*/
|
||||
dd->ipath_rhdrhead_intr_off = 1ULL<<32;
|
||||
|
||||
ipath_get_eeprom_info(dd);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -375,10 +375,10 @@ static void ipath_error_qp(struct ipath_qp *qp)
|
|||
|
||||
spin_lock(&dev->pending_lock);
|
||||
/* XXX What if its already removed by the timeout code? */
|
||||
if (qp->timerwait.next != LIST_POISON1)
|
||||
list_del(&qp->timerwait);
|
||||
if (qp->piowait.next != LIST_POISON1)
|
||||
list_del(&qp->piowait);
|
||||
if (!list_empty(&qp->timerwait))
|
||||
list_del_init(&qp->timerwait);
|
||||
if (!list_empty(&qp->piowait))
|
||||
list_del_init(&qp->piowait);
|
||||
spin_unlock(&dev->pending_lock);
|
||||
|
||||
wc.status = IB_WC_WR_FLUSH_ERR;
|
||||
|
@ -427,6 +427,7 @@ static void ipath_error_qp(struct ipath_qp *qp)
|
|||
int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
||||
int attr_mask)
|
||||
{
|
||||
struct ipath_ibdev *dev = to_idev(ibqp->device);
|
||||
struct ipath_qp *qp = to_iqp(ibqp);
|
||||
enum ib_qp_state cur_state, new_state;
|
||||
unsigned long flags;
|
||||
|
@ -443,6 +444,19 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
|||
attr_mask))
|
||||
goto inval;
|
||||
|
||||
if (attr_mask & IB_QP_AV)
|
||||
if (attr->ah_attr.dlid == 0 ||
|
||||
attr->ah_attr.dlid >= IPS_MULTICAST_LID_BASE)
|
||||
goto inval;
|
||||
|
||||
if (attr_mask & IB_QP_PKEY_INDEX)
|
||||
if (attr->pkey_index >= ipath_layer_get_npkeys(dev->dd))
|
||||
goto inval;
|
||||
|
||||
if (attr_mask & IB_QP_MIN_RNR_TIMER)
|
||||
if (attr->min_rnr_timer > 31)
|
||||
goto inval;
|
||||
|
||||
switch (new_state) {
|
||||
case IB_QPS_RESET:
|
||||
ipath_reset_qp(qp);
|
||||
|
@ -457,13 +471,8 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
|||
|
||||
}
|
||||
|
||||
if (attr_mask & IB_QP_PKEY_INDEX) {
|
||||
struct ipath_ibdev *dev = to_idev(ibqp->device);
|
||||
|
||||
if (attr->pkey_index >= ipath_layer_get_npkeys(dev->dd))
|
||||
goto inval;
|
||||
if (attr_mask & IB_QP_PKEY_INDEX)
|
||||
qp->s_pkey_index = attr->pkey_index;
|
||||
}
|
||||
|
||||
if (attr_mask & IB_QP_DEST_QPN)
|
||||
qp->remote_qpn = attr->dest_qp_num;
|
||||
|
@ -479,12 +488,8 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
|||
if (attr_mask & IB_QP_ACCESS_FLAGS)
|
||||
qp->qp_access_flags = attr->qp_access_flags;
|
||||
|
||||
if (attr_mask & IB_QP_AV) {
|
||||
if (attr->ah_attr.dlid == 0 ||
|
||||
attr->ah_attr.dlid >= IPS_MULTICAST_LID_BASE)
|
||||
goto inval;
|
||||
if (attr_mask & IB_QP_AV)
|
||||
qp->remote_ah_attr = attr->ah_attr;
|
||||
}
|
||||
|
||||
if (attr_mask & IB_QP_PATH_MTU)
|
||||
qp->path_mtu = attr->path_mtu;
|
||||
|
@ -499,11 +504,8 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
|||
qp->s_rnr_retry_cnt = qp->s_rnr_retry;
|
||||
}
|
||||
|
||||
if (attr_mask & IB_QP_MIN_RNR_TIMER) {
|
||||
if (attr->min_rnr_timer > 31)
|
||||
goto inval;
|
||||
if (attr_mask & IB_QP_MIN_RNR_TIMER)
|
||||
qp->s_min_rnr_timer = attr->min_rnr_timer;
|
||||
}
|
||||
|
||||
if (attr_mask & IB_QP_QKEY)
|
||||
qp->qkey = attr->qkey;
|
||||
|
@ -710,10 +712,8 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
|
|||
init_attr->qp_type == IB_QPT_RC ?
|
||||
ipath_do_rc_send : ipath_do_uc_send,
|
||||
(unsigned long)qp);
|
||||
qp->piowait.next = LIST_POISON1;
|
||||
qp->piowait.prev = LIST_POISON2;
|
||||
qp->timerwait.next = LIST_POISON1;
|
||||
qp->timerwait.prev = LIST_POISON2;
|
||||
INIT_LIST_HEAD(&qp->piowait);
|
||||
INIT_LIST_HEAD(&qp->timerwait);
|
||||
qp->state = IB_QPS_RESET;
|
||||
qp->s_wq = swq;
|
||||
qp->s_size = init_attr->cap.max_send_wr + 1;
|
||||
|
@ -734,7 +734,7 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
|
|||
ipath_reset_qp(qp);
|
||||
|
||||
/* Tell the core driver that the kernel SMA is present. */
|
||||
if (qp->ibqp.qp_type == IB_QPT_SMI)
|
||||
if (init_attr->qp_type == IB_QPT_SMI)
|
||||
ipath_layer_set_verbs_flags(dev->dd,
|
||||
IPATH_VERBS_KERNEL_SMA);
|
||||
break;
|
||||
|
@ -783,10 +783,10 @@ int ipath_destroy_qp(struct ib_qp *ibqp)
|
|||
|
||||
/* Make sure the QP isn't on the timeout list. */
|
||||
spin_lock_irqsave(&dev->pending_lock, flags);
|
||||
if (qp->timerwait.next != LIST_POISON1)
|
||||
list_del(&qp->timerwait);
|
||||
if (qp->piowait.next != LIST_POISON1)
|
||||
list_del(&qp->piowait);
|
||||
if (!list_empty(&qp->timerwait))
|
||||
list_del_init(&qp->timerwait);
|
||||
if (!list_empty(&qp->piowait))
|
||||
list_del_init(&qp->piowait);
|
||||
spin_unlock_irqrestore(&dev->pending_lock, flags);
|
||||
|
||||
/*
|
||||
|
@ -855,10 +855,10 @@ void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc)
|
|||
|
||||
spin_lock(&dev->pending_lock);
|
||||
/* XXX What if its already removed by the timeout code? */
|
||||
if (qp->timerwait.next != LIST_POISON1)
|
||||
list_del(&qp->timerwait);
|
||||
if (qp->piowait.next != LIST_POISON1)
|
||||
list_del(&qp->piowait);
|
||||
if (!list_empty(&qp->timerwait))
|
||||
list_del_init(&qp->timerwait);
|
||||
if (!list_empty(&qp->piowait))
|
||||
list_del_init(&qp->piowait);
|
||||
spin_unlock(&dev->pending_lock);
|
||||
|
||||
ipath_cq_enter(to_icq(qp->ibqp.send_cq), wc, 1);
|
||||
|
|
|
@ -57,7 +57,7 @@ static void ipath_init_restart(struct ipath_qp *qp, struct ipath_swqe *wqe)
|
|||
qp->s_len = wqe->length - len;
|
||||
dev = to_idev(qp->ibqp.device);
|
||||
spin_lock(&dev->pending_lock);
|
||||
if (qp->timerwait.next == LIST_POISON1)
|
||||
if (list_empty(&qp->timerwait))
|
||||
list_add_tail(&qp->timerwait,
|
||||
&dev->pending[dev->pending_index]);
|
||||
spin_unlock(&dev->pending_lock);
|
||||
|
@ -356,7 +356,7 @@ static inline int ipath_make_rc_req(struct ipath_qp *qp,
|
|||
if ((int)(qp->s_psn - qp->s_next_psn) > 0)
|
||||
qp->s_next_psn = qp->s_psn;
|
||||
spin_lock(&dev->pending_lock);
|
||||
if (qp->timerwait.next == LIST_POISON1)
|
||||
if (list_empty(&qp->timerwait))
|
||||
list_add_tail(&qp->timerwait,
|
||||
&dev->pending[dev->pending_index]);
|
||||
spin_unlock(&dev->pending_lock);
|
||||
|
@ -726,8 +726,8 @@ void ipath_restart_rc(struct ipath_qp *qp, u32 psn, struct ib_wc *wc)
|
|||
*/
|
||||
dev = to_idev(qp->ibqp.device);
|
||||
spin_lock(&dev->pending_lock);
|
||||
if (qp->timerwait.next != LIST_POISON1)
|
||||
list_del(&qp->timerwait);
|
||||
if (!list_empty(&qp->timerwait))
|
||||
list_del_init(&qp->timerwait);
|
||||
spin_unlock(&dev->pending_lock);
|
||||
|
||||
if (wqe->wr.opcode == IB_WR_RDMA_READ)
|
||||
|
@ -886,8 +886,8 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode)
|
|||
* just won't find anything to restart if we ACK everything.
|
||||
*/
|
||||
spin_lock(&dev->pending_lock);
|
||||
if (qp->timerwait.next != LIST_POISON1)
|
||||
list_del(&qp->timerwait);
|
||||
if (!list_empty(&qp->timerwait))
|
||||
list_del_init(&qp->timerwait);
|
||||
spin_unlock(&dev->pending_lock);
|
||||
|
||||
/*
|
||||
|
@ -1194,8 +1194,7 @@ static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev,
|
|||
IB_WR_RDMA_READ))
|
||||
goto ack_done;
|
||||
spin_lock(&dev->pending_lock);
|
||||
if (qp->s_rnr_timeout == 0 &&
|
||||
qp->timerwait.next != LIST_POISON1)
|
||||
if (qp->s_rnr_timeout == 0 && !list_empty(&qp->timerwait))
|
||||
list_move_tail(&qp->timerwait,
|
||||
&dev->pending[dev->pending_index]);
|
||||
spin_unlock(&dev->pending_lock);
|
||||
|
|
|
@ -435,7 +435,7 @@ void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev)
|
|||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&dev->pending_lock, flags);
|
||||
if (qp->piowait.next == LIST_POISON1)
|
||||
if (list_empty(&qp->piowait))
|
||||
list_add_tail(&qp->piowait, &dev->piowait);
|
||||
spin_unlock_irqrestore(&dev->pending_lock, flags);
|
||||
/*
|
||||
|
|
|
@ -464,7 +464,7 @@ static void ipath_ib_timer(void *arg)
|
|||
last = &dev->pending[dev->pending_index];
|
||||
while (!list_empty(last)) {
|
||||
qp = list_entry(last->next, struct ipath_qp, timerwait);
|
||||
list_del(&qp->timerwait);
|
||||
list_del_init(&qp->timerwait);
|
||||
qp->timer_next = resend;
|
||||
resend = qp;
|
||||
atomic_inc(&qp->refcount);
|
||||
|
@ -474,7 +474,7 @@ static void ipath_ib_timer(void *arg)
|
|||
qp = list_entry(last->next, struct ipath_qp, timerwait);
|
||||
if (--qp->s_rnr_timeout == 0) {
|
||||
do {
|
||||
list_del(&qp->timerwait);
|
||||
list_del_init(&qp->timerwait);
|
||||
tasklet_hi_schedule(&qp->s_task);
|
||||
if (list_empty(last))
|
||||
break;
|
||||
|
@ -554,7 +554,7 @@ static int ipath_ib_piobufavail(void *arg)
|
|||
while (!list_empty(&dev->piowait)) {
|
||||
qp = list_entry(dev->piowait.next, struct ipath_qp,
|
||||
piowait);
|
||||
list_del(&qp->piowait);
|
||||
list_del_init(&qp->piowait);
|
||||
tasklet_hi_schedule(&qp->s_task);
|
||||
}
|
||||
spin_unlock_irqrestore(&dev->pending_lock, flags);
|
||||
|
@ -951,6 +951,7 @@ static void *ipath_register_ib_device(int unit, struct ipath_devdata *dd)
|
|||
idev->dd = dd;
|
||||
|
||||
strlcpy(dev->name, "ipath%d", IB_DEVICE_NAME_MAX);
|
||||
dev->owner = THIS_MODULE;
|
||||
dev->node_guid = ipath_layer_get_guid(dd);
|
||||
dev->uverbs_abi_ver = IPATH_UVERBS_ABI_VERSION;
|
||||
dev->uverbs_cmd_mask =
|
||||
|
|
|
@ -182,7 +182,7 @@ struct mthca_cmd_context {
|
|||
u8 status;
|
||||
};
|
||||
|
||||
static int fw_cmd_doorbell = 1;
|
||||
static int fw_cmd_doorbell = 0;
|
||||
module_param(fw_cmd_doorbell, int, 0644);
|
||||
MODULE_PARM_DESC(fw_cmd_doorbell, "post FW commands through doorbell page if nonzero "
|
||||
"(and supported by FW)");
|
||||
|
|
|
@ -1727,23 +1727,7 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
|
|||
|
||||
ind = qp->rq.next_ind;
|
||||
|
||||
for (nreq = 0; wr; ++nreq, wr = wr->next) {
|
||||
if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) {
|
||||
nreq = 0;
|
||||
|
||||
doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0);
|
||||
doorbell[1] = cpu_to_be32(qp->qpn << 8);
|
||||
|
||||
wmb();
|
||||
|
||||
mthca_write64(doorbell,
|
||||
dev->kar + MTHCA_RECEIVE_DOORBELL,
|
||||
MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
|
||||
|
||||
qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB;
|
||||
size0 = 0;
|
||||
}
|
||||
|
||||
for (nreq = 0; wr; wr = wr->next) {
|
||||
if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
|
||||
mthca_err(dev, "RQ %06x full (%u head, %u tail,"
|
||||
" %d max, %d nreq)\n", qp->qpn,
|
||||
|
@ -1797,6 +1781,23 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
|
|||
++ind;
|
||||
if (unlikely(ind >= qp->rq.max))
|
||||
ind -= qp->rq.max;
|
||||
|
||||
++nreq;
|
||||
if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) {
|
||||
nreq = 0;
|
||||
|
||||
doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0);
|
||||
doorbell[1] = cpu_to_be32(qp->qpn << 8);
|
||||
|
||||
wmb();
|
||||
|
||||
mthca_write64(doorbell,
|
||||
dev->kar + MTHCA_RECEIVE_DOORBELL,
|
||||
MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
|
||||
|
||||
qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB;
|
||||
size0 = 0;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
|
|
|
@ -340,7 +340,10 @@ static void srp_disconnect_target(struct srp_target_port *target)
|
|||
/* XXX should send SRP_I_LOGOUT request */
|
||||
|
||||
init_completion(&target->done);
|
||||
ib_send_cm_dreq(target->cm_id, NULL, 0);
|
||||
if (ib_send_cm_dreq(target->cm_id, NULL, 0)) {
|
||||
printk(KERN_DEBUG PFX "Sending CM DREQ failed\n");
|
||||
return;
|
||||
}
|
||||
wait_for_completion(&target->done);
|
||||
}
|
||||
|
||||
|
@ -351,7 +354,6 @@ static void srp_remove_work(void *target_ptr)
|
|||
spin_lock_irq(target->scsi_host->host_lock);
|
||||
if (target->state != SRP_TARGET_DEAD) {
|
||||
spin_unlock_irq(target->scsi_host->host_lock);
|
||||
scsi_host_put(target->scsi_host);
|
||||
return;
|
||||
}
|
||||
target->state = SRP_TARGET_REMOVED;
|
||||
|
@ -365,8 +367,6 @@ static void srp_remove_work(void *target_ptr)
|
|||
ib_destroy_cm_id(target->cm_id);
|
||||
srp_free_target_ib(target);
|
||||
scsi_host_put(target->scsi_host);
|
||||
/* And another put to really free the target port... */
|
||||
scsi_host_put(target->scsi_host);
|
||||
}
|
||||
|
||||
static int srp_connect_target(struct srp_target_port *target)
|
||||
|
@ -1241,7 +1241,7 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
|
|||
list_for_each_entry_safe(req, tmp, &target->req_queue, list)
|
||||
if (req->scmnd->device == scmnd->device) {
|
||||
req->scmnd->result = DID_RESET << 16;
|
||||
scmnd->scsi_done(scmnd);
|
||||
req->scmnd->scsi_done(req->scmnd);
|
||||
srp_remove_req(target, req);
|
||||
}
|
||||
|
||||
|
|
|
@ -1499,7 +1499,6 @@ static int __init capi_init(void)
|
|||
printk(KERN_ERR "capi20: unable to get major %d\n", capi_major);
|
||||
return major_ret;
|
||||
}
|
||||
capi_major = major_ret;
|
||||
capi_class = class_create(THIS_MODULE, "capi");
|
||||
if (IS_ERR(capi_class)) {
|
||||
unregister_chrdev(capi_major, "capi20");
|
||||
|
|
|
@ -710,8 +710,8 @@ static int gigaset_probe(struct usb_interface *interface,
|
|||
retval = -ENODEV; //FIXME
|
||||
|
||||
/* See if the device offered us matches what we can accept */
|
||||
if ((le16_to_cpu(udev->descriptor.idVendor != USB_M105_VENDOR_ID)) ||
|
||||
(le16_to_cpu(udev->descriptor.idProduct != USB_M105_PRODUCT_ID)))
|
||||
if ((le16_to_cpu(udev->descriptor.idVendor) != USB_M105_VENDOR_ID) ||
|
||||
(le16_to_cpu(udev->descriptor.idProduct) != USB_M105_PRODUCT_ID))
|
||||
return -ENODEV;
|
||||
|
||||
/* this starts to become ascii art... */
|
||||
|
|
|
@ -2880,7 +2880,7 @@ isdn_tty_cmd_ATand(char **p, modem_info * info)
|
|||
p[0]++;
|
||||
i = 0;
|
||||
while (*p[0] && (strchr("0123456789,-*[]?;", *p[0])) &&
|
||||
(i < ISDN_LMSNLEN))
|
||||
(i < ISDN_LMSNLEN - 1))
|
||||
m->lmsn[i++] = *p[0]++;
|
||||
m->lmsn[i] = '\0';
|
||||
break;
|
||||
|
|
|
@ -4,8 +4,11 @@ menu "LED devices"
|
|||
config NEW_LEDS
|
||||
bool "LED Support"
|
||||
help
|
||||
Say Y to enable Linux LED support. This is not related to standard
|
||||
keyboard LEDs which are controlled via the input system.
|
||||
Say Y to enable Linux LED support. This allows control of supported
|
||||
LEDs from both userspace and optionally, by kernel events (triggers).
|
||||
|
||||
This is not related to standard keyboard LEDs which are controlled
|
||||
via the input system.
|
||||
|
||||
config LEDS_CLASS
|
||||
tristate "LED Class Support"
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue