Merge branch 'upstream-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6

* 'upstream-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6: (48 commits)
  ieee1394: raw1394: arm functions slept in atomic context
  ieee1394: sbp2: enable auto spin-up for all SBP-2 devices
  MAINTAINERS: updates to IEEE 1394 subsystem maintainership
  ieee1394: ohci1394: check for errors in suspend or resume
  set power state of firewire host during suspend
  ieee1394: ohci1394: more obvious endianess handling
  ieee1394: ohci1394: fix endianess bug in debug message
  ieee1394: sbp2: don't prefer MODE SENSE 10
  ieee1394: nodemgr: grab class.subsys.rwsem in nodemgr_resume_ne
  ieee1394: nodemgr: fix rwsem recursion
  ieee1394: sbp2: more help in Kconfig
  ieee1394: sbp2: prevent rare deadlock in shutdown
  ieee1394: sbp2: update includes
  ieee1394: sbp2: better handling of transport errors
  ieee1394: sbp2: recheck node generation in sbp2_update
  ieee1394: sbp2: safer agent reset in error handlers
  ieee1394: sbp2: handle "sbp2util_node_write_no_wait failed"
  CONFIG_PM=n slim: drivers/ieee1394/ohci1394.c
  ieee1394: safer definition of empty macros
  video1394: add poll file operation support
  ...
This commit is contained in:
Linus Torvalds 2006-09-30 09:38:19 -07:00
commit 0cd43f83d3
31 changed files with 1217 additions and 1176 deletions

View file

@ -46,15 +46,6 @@ Who: Jody McIntyre <scjody@modernduck.com>
--------------------------- ---------------------------
What: sbp2: module parameter "force_inquiry_hack"
When: July 2006
Why: Superceded by parameter "workarounds". Both parameters are meant to be
used ad-hoc and for single devices only, i.e. not in modprobe.conf,
therefore the impact of this feature replacement should be low.
Who: Stefan Richter <stefanr@s5r6.in-berlin.de>
---------------------------
What: Video4Linux API 1 ioctls and video_decoder.h from Video devices. What: Video4Linux API 1 ioctls and video_decoder.h from Video devices.
When: July 2006 When: July 2006
Why: V4L1 AP1 was replaced by V4L2 API. during migration from 2.4 to 2.6 Why: V4L1 AP1 was replaced by V4L2 API. during migration from 2.4 to 2.6

View file

@ -1398,36 +1398,29 @@ M: Gadi Oxman <gadio@netvision.net.il>
L: linux-kernel@vger.kernel.org L: linux-kernel@vger.kernel.org
S: Maintained S: Maintained
IEEE 1394 ETHERNET (eth1394)
L: linux1394-devel@lists.sourceforge.net
W: http://www.linux1394.org/
S: Orphan
IEEE 1394 SUBSYSTEM IEEE 1394 SUBSYSTEM
P: Ben Collins P: Ben Collins
M: bcollins@debian.org M: bcollins@debian.org
P: Jody McIntyre P: Stefan Richter
M: scjody@modernduck.com M: stefanr@s5r6.in-berlin.de
L: linux1394-devel@lists.sourceforge.net L: linux1394-devel@lists.sourceforge.net
W: http://www.linux1394.org/ W: http://www.linux1394.org/
T: git kernel.org:/pub/scm/linux/kernel/git/scjody/ieee1394.git T: git kernel.org:/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6.git
S: Maintained S: Maintained
IEEE 1394 OHCI DRIVER IEEE 1394 IPV4 DRIVER (eth1394)
P: Ben Collins P: Stefan Richter
M: bcollins@debian.org M: stefanr@s5r6.in-berlin.de
P: Jody McIntyre
M: scjody@modernduck.com
L: linux1394-devel@lists.sourceforge.net L: linux1394-devel@lists.sourceforge.net
W: http://www.linux1394.org/ S: Odd Fixes
S: Maintained
IEEE 1394 PCILYNX DRIVER IEEE 1394 PCILYNX DRIVER
P: Jody McIntyre P: Jody McIntyre
M: scjody@modernduck.com M: scjody@modernduck.com
P: Stefan Richter
M: stefanr@s5r6.in-berlin.de
L: linux1394-devel@lists.sourceforge.net L: linux1394-devel@lists.sourceforge.net
W: http://www.linux1394.org/ S: Odd Fixes
S: Maintained
IEEE 1394 RAW I/O DRIVER IEEE 1394 RAW I/O DRIVER
P: Ben Collins P: Ben Collins
@ -1435,16 +1428,6 @@ M: bcollins@debian.org
P: Dan Dennedy P: Dan Dennedy
M: dan@dennedy.org M: dan@dennedy.org
L: linux1394-devel@lists.sourceforge.net L: linux1394-devel@lists.sourceforge.net
W: http://www.linux1394.org/
S: Maintained
IEEE 1394 SBP2
P: Ben Collins
M: bcollins@debian.org
P: Stefan Richter
M: stefanr@s5r6.in-berlin.de
L: linux1394-devel@lists.sourceforge.net
W: http://www.linux1394.org/
S: Maintained S: Maintained
IMS TWINTURBO FRAMEBUFFER DRIVER IMS TWINTURBO FRAMEBUFFER DRIVER

View file

@ -120,12 +120,19 @@ config IEEE1394_VIDEO1394
this option only if you have an IEEE 1394 video device connected to this option only if you have an IEEE 1394 video device connected to
an OHCI-1394 card. an OHCI-1394 card.
comment "SBP-2 support (for storage devices) requires SCSI"
depends on IEEE1394 && SCSI=n
config IEEE1394_SBP2 config IEEE1394_SBP2
tristate "SBP-2 support (Harddisks etc.)" tristate "SBP-2 support (Harddisks etc.)"
depends on IEEE1394 && SCSI && (PCI || BROKEN) depends on IEEE1394 && SCSI && (PCI || BROKEN)
help help
This option enables you to use SBP-2 devices connected to your IEEE This option enables you to use SBP-2 devices connected to an IEEE
1394 bus. SBP-2 devices include harddrives and DVD devices. 1394 bus. SBP-2 devices include storage devices like harddisks and
DVD drives, also some other FireWire devices like scanners.
You should also enable support for disks, CD-ROMs, etc. in the SCSI
configuration section.
config IEEE1394_SBP2_PHYS_DMA config IEEE1394_SBP2_PHYS_DMA
bool "Enable replacement for physical DMA in SBP2" bool "Enable replacement for physical DMA in SBP2"

View file

@ -17,11 +17,13 @@
* *
*/ */
#include <linux/string.h> #include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/moduleparam.h> #include <linux/moduleparam.h>
#include <linux/param.h> #include <linux/param.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/string.h>
#include "csr1212.h" #include "csr1212.h"
#include "ieee1394_types.h" #include "ieee1394_types.h"
@ -149,31 +151,18 @@ static void host_reset(struct hpsb_host *host)
/* /*
* HI == seconds (bits 0:2) * HI == seconds (bits 0:2)
* LO == fraction units of 1/8000 of a second, as per 1394 (bits 19:31) * LO == fractions of a second in units of 125usec (bits 19:31)
* *
* Convert to units and then to HZ, for comparison to jiffies. * Convert SPLIT_TIMEOUT to jiffies.
* * The default and minimum as per 1394a-2000 clause 8.3.2.2.6 is 100ms.
* By default this will end up being 800 units, or 100ms (125usec per
* unit).
*
* NOTE: The spec says 1/8000, but also says we can compute based on 1/8192
* like CSR specifies. Should make our math less complex.
*/ */
static inline void calculate_expire(struct csr_control *csr) static inline void calculate_expire(struct csr_control *csr)
{ {
unsigned long units; unsigned long usecs =
(csr->split_timeout_hi & 0x07) * USEC_PER_SEC +
(csr->split_timeout_lo >> 19) * 125L;
/* Take the seconds, and convert to units */ csr->expire = usecs_to_jiffies(usecs > 100000L ? usecs : 100000L);
units = (unsigned long)(csr->split_timeout_hi & 0x07) << 13;
/* Add in the fractional units */
units += (unsigned long)(csr->split_timeout_lo >> 19);
/* Convert to jiffies */
csr->expire = (unsigned long)(units * HZ) >> 13UL;
/* Just to keep from rounding low */
csr->expire++;
HPSB_VERBOSE("CSR: setting expire to %lu, HZ=%u", csr->expire, HZ); HPSB_VERBOSE("CSR: setting expire to %lu, HZ=%u", csr->expire, HZ);
} }

View file

@ -1,75 +1,73 @@
#ifndef _IEEE1394_CSR_H #ifndef _IEEE1394_CSR_H
#define _IEEE1394_CSR_H #define _IEEE1394_CSR_H
#ifdef CONFIG_PREEMPT #include <linux/spinlock_types.h>
#include <linux/sched.h>
#endif
#include "csr1212.h" #include "csr1212.h"
#include "ieee1394_types.h"
#define CSR_REGISTER_BASE 0xfffff0000000ULL #define CSR_REGISTER_BASE 0xfffff0000000ULL
/* register offsets relative to CSR_REGISTER_BASE */ /* register offsets relative to CSR_REGISTER_BASE */
#define CSR_STATE_CLEAR 0x0 #define CSR_STATE_CLEAR 0x0
#define CSR_STATE_SET 0x4 #define CSR_STATE_SET 0x4
#define CSR_NODE_IDS 0x8 #define CSR_NODE_IDS 0x8
#define CSR_RESET_START 0xc #define CSR_RESET_START 0xc
#define CSR_SPLIT_TIMEOUT_HI 0x18 #define CSR_SPLIT_TIMEOUT_HI 0x18
#define CSR_SPLIT_TIMEOUT_LO 0x1c #define CSR_SPLIT_TIMEOUT_LO 0x1c
#define CSR_CYCLE_TIME 0x200 #define CSR_CYCLE_TIME 0x200
#define CSR_BUS_TIME 0x204 #define CSR_BUS_TIME 0x204
#define CSR_BUSY_TIMEOUT 0x210 #define CSR_BUSY_TIMEOUT 0x210
#define CSR_BUS_MANAGER_ID 0x21c #define CSR_BUS_MANAGER_ID 0x21c
#define CSR_BANDWIDTH_AVAILABLE 0x220 #define CSR_BANDWIDTH_AVAILABLE 0x220
#define CSR_CHANNELS_AVAILABLE 0x224 #define CSR_CHANNELS_AVAILABLE 0x224
#define CSR_CHANNELS_AVAILABLE_HI 0x224 #define CSR_CHANNELS_AVAILABLE_HI 0x224
#define CSR_CHANNELS_AVAILABLE_LO 0x228 #define CSR_CHANNELS_AVAILABLE_LO 0x228
#define CSR_BROADCAST_CHANNEL 0x234 #define CSR_BROADCAST_CHANNEL 0x234
#define CSR_CONFIG_ROM 0x400 #define CSR_CONFIG_ROM 0x400
#define CSR_CONFIG_ROM_END 0x800 #define CSR_CONFIG_ROM_END 0x800
#define CSR_FCP_COMMAND 0xB00 #define CSR_FCP_COMMAND 0xB00
#define CSR_FCP_RESPONSE 0xD00 #define CSR_FCP_RESPONSE 0xD00
#define CSR_FCP_END 0xF00 #define CSR_FCP_END 0xF00
#define CSR_TOPOLOGY_MAP 0x1000 #define CSR_TOPOLOGY_MAP 0x1000
#define CSR_TOPOLOGY_MAP_END 0x1400 #define CSR_TOPOLOGY_MAP_END 0x1400
#define CSR_SPEED_MAP 0x2000 #define CSR_SPEED_MAP 0x2000
#define CSR_SPEED_MAP_END 0x3000 #define CSR_SPEED_MAP_END 0x3000
/* IEEE 1394 bus specific Configuration ROM Key IDs */ /* IEEE 1394 bus specific Configuration ROM Key IDs */
#define IEEE1394_KV_ID_POWER_REQUIREMENTS (0x30) #define IEEE1394_KV_ID_POWER_REQUIREMENTS (0x30)
/* IEEE 1394 Bus Inforamation Block specifics */ /* IEEE 1394 Bus Information Block specifics */
#define CSR_BUS_INFO_SIZE (5 * sizeof(quadlet_t)) #define CSR_BUS_INFO_SIZE (5 * sizeof(quadlet_t))
#define CSR_IRMC_SHIFT 31 #define CSR_IRMC_SHIFT 31
#define CSR_CMC_SHIFT 30 #define CSR_CMC_SHIFT 30
#define CSR_ISC_SHIFT 29 #define CSR_ISC_SHIFT 29
#define CSR_BMC_SHIFT 28 #define CSR_BMC_SHIFT 28
#define CSR_PMC_SHIFT 27 #define CSR_PMC_SHIFT 27
#define CSR_CYC_CLK_ACC_SHIFT 16 #define CSR_CYC_CLK_ACC_SHIFT 16
#define CSR_MAX_REC_SHIFT 12 #define CSR_MAX_REC_SHIFT 12
#define CSR_MAX_ROM_SHIFT 8 #define CSR_MAX_ROM_SHIFT 8
#define CSR_GENERATION_SHIFT 4 #define CSR_GENERATION_SHIFT 4
#define CSR_SET_BUS_INFO_GENERATION(csr, gen) \ #define CSR_SET_BUS_INFO_GENERATION(csr, gen) \
((csr)->bus_info_data[2] = \ ((csr)->bus_info_data[2] = \
cpu_to_be32((be32_to_cpu((csr)->bus_info_data[2]) & \ cpu_to_be32((be32_to_cpu((csr)->bus_info_data[2]) & \
~(0xf << CSR_GENERATION_SHIFT)) | \ ~(0xf << CSR_GENERATION_SHIFT)) | \
(gen) << CSR_GENERATION_SHIFT)) (gen) << CSR_GENERATION_SHIFT))
struct csr_control { struct csr_control {
spinlock_t lock; spinlock_t lock;
quadlet_t state; quadlet_t state;
quadlet_t node_ids; quadlet_t node_ids;
quadlet_t split_timeout_hi, split_timeout_lo; quadlet_t split_timeout_hi, split_timeout_lo;
unsigned long expire; // Calculated from split_timeout unsigned long expire; /* Calculated from split_timeout */
quadlet_t cycle_time; quadlet_t cycle_time;
quadlet_t bus_time; quadlet_t bus_time;
quadlet_t bus_manager_id; quadlet_t bus_manager_id;
quadlet_t bandwidth_available; quadlet_t bandwidth_available;
quadlet_t channels_available_hi, channels_available_lo; quadlet_t channels_available_hi, channels_available_lo;
quadlet_t broadcast_channel; quadlet_t broadcast_channel;
/* Bus Info */ /* Bus Info */
@ -84,8 +82,8 @@ struct csr_control {
struct csr1212_csr *rom; struct csr1212_csr *rom;
quadlet_t topology_map[256]; quadlet_t topology_map[256];
quadlet_t speed_map[1024]; quadlet_t speed_map[1024];
}; };
extern struct csr1212_bus_ops csr_bus_ops; extern struct csr1212_bus_ops csr_bus_ops;
@ -93,4 +91,9 @@ extern struct csr1212_bus_ops csr_bus_ops;
int init_csr(void); int init_csr(void);
void cleanup_csr(void); void cleanup_csr(void);
/* hpsb_update_config_rom() is deprecated */
struct hpsb_host;
int hpsb_update_config_rom(struct hpsb_host *host, const quadlet_t *new_rom,
size_t size, unsigned char rom_version);
#endif /* _IEEE1394_CSR_H */ #endif /* _IEEE1394_CSR_H */

View file

@ -7,10 +7,13 @@
* directory of the kernel sources for details. * directory of the kernel sources for details.
*/ */
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <asm/scatterlist.h>
#include "dma.h" #include "dma.h"
/* dma_prog_region */ /* dma_prog_region */

View file

@ -10,69 +10,91 @@
#ifndef IEEE1394_DMA_H #ifndef IEEE1394_DMA_H
#define IEEE1394_DMA_H #define IEEE1394_DMA_H
#include <linux/pci.h> #include <asm/types.h>
#include <asm/scatterlist.h>
/* struct dma_prog_region struct pci_dev;
struct scatterlist;
a small, physically-contiguous DMA buffer with random-access, struct vm_area_struct;
synchronous usage characteristics
*/
/**
* struct dma_prog_region - small contiguous DMA buffer
* @kvirt: kernel virtual address
* @dev: PCI device
* @n_pages: number of kernel pages
* @bus_addr: base bus address
*
* a small, physically contiguous DMA buffer with random-access, synchronous
* usage characteristics
*/
struct dma_prog_region { struct dma_prog_region {
unsigned char *kvirt; /* kernel virtual address */ unsigned char *kvirt;
struct pci_dev *dev; /* PCI device */ struct pci_dev *dev;
unsigned int n_pages; /* # of kernel pages */ unsigned int n_pages;
dma_addr_t bus_addr; /* base bus address */ dma_addr_t bus_addr;
}; };
/* clear out all fields but do not allocate any memory */ /* clear out all fields but do not allocate any memory */
void dma_prog_region_init(struct dma_prog_region *prog); void dma_prog_region_init(struct dma_prog_region *prog);
int dma_prog_region_alloc(struct dma_prog_region *prog, unsigned long n_bytes, struct pci_dev *dev); int dma_prog_region_alloc(struct dma_prog_region *prog, unsigned long n_bytes,
struct pci_dev *dev);
void dma_prog_region_free(struct dma_prog_region *prog); void dma_prog_region_free(struct dma_prog_region *prog);
static inline dma_addr_t dma_prog_region_offset_to_bus(struct dma_prog_region *prog, unsigned long offset) static inline dma_addr_t dma_prog_region_offset_to_bus(
struct dma_prog_region *prog, unsigned long offset)
{ {
return prog->bus_addr + offset; return prog->bus_addr + offset;
} }
/* struct dma_region /**
* struct dma_region - large non-contiguous DMA buffer
a large, non-physically-contiguous DMA buffer with streaming, * @virt: kernel virtual address
asynchronous usage characteristics * @dev: PCI device
*/ * @n_pages: number of kernel pages
* @n_dma_pages: number of IOMMU pages
* @sglist: IOMMU mapping
* @direction: PCI_DMA_TODEVICE, etc.
*
* a large, non-physically-contiguous DMA buffer with streaming, asynchronous
* usage characteristics
*/
struct dma_region { struct dma_region {
unsigned char *kvirt; /* kernel virtual address */ unsigned char *kvirt;
struct pci_dev *dev; /* PCI device */ struct pci_dev *dev;
unsigned int n_pages; /* # of kernel pages */ unsigned int n_pages;
unsigned int n_dma_pages; /* # of IOMMU pages */ unsigned int n_dma_pages;
struct scatterlist *sglist; /* IOMMU mapping */ struct scatterlist *sglist;
int direction; /* PCI_DMA_TODEVICE, etc */ int direction;
}; };
/* clear out all fields but do not allocate anything */ /* clear out all fields but do not allocate anything */
void dma_region_init(struct dma_region *dma); void dma_region_init(struct dma_region *dma);
/* allocate the buffer and map it to the IOMMU */ /* allocate the buffer and map it to the IOMMU */
int dma_region_alloc(struct dma_region *dma, unsigned long n_bytes, struct pci_dev *dev, int direction); int dma_region_alloc(struct dma_region *dma, unsigned long n_bytes,
struct pci_dev *dev, int direction);
/* unmap and free the buffer */ /* unmap and free the buffer */
void dma_region_free(struct dma_region *dma); void dma_region_free(struct dma_region *dma);
/* sync the CPU's view of the buffer */ /* sync the CPU's view of the buffer */
void dma_region_sync_for_cpu(struct dma_region *dma, unsigned long offset, unsigned long len); void dma_region_sync_for_cpu(struct dma_region *dma, unsigned long offset,
unsigned long len);
/* sync the IO bus' view of the buffer */ /* sync the IO bus' view of the buffer */
void dma_region_sync_for_device(struct dma_region *dma, unsigned long offset, unsigned long len); void dma_region_sync_for_device(struct dma_region *dma, unsigned long offset,
unsigned long len);
/* map the buffer into a user space process */ /* map the buffer into a user space process */
int dma_region_mmap(struct dma_region *dma, struct file *file, struct vm_area_struct *vma); int dma_region_mmap(struct dma_region *dma, struct file *file,
struct vm_area_struct *vma);
/* macro to index into a DMA region (or dma_prog_region) */ /* macro to index into a DMA region (or dma_prog_region) */
#define dma_region_i(_dma, _type, _index) ( ((_type*) ((_dma)->kvirt)) + (_index) ) #define dma_region_i(_dma, _type, _index) \
( ((_type*) ((_dma)->kvirt)) + (_index) )
/* return the DMA bus address of the byte with the given offset /* return the DMA bus address of the byte with the given offset
relative to the beginning of the dma_region */ * relative to the beginning of the dma_region */
dma_addr_t dma_region_offset_to_bus(struct dma_region *dma, unsigned long offset); dma_addr_t dma_region_offset_to_bus(struct dma_region *dma,
unsigned long offset);
#endif /* IEEE1394_DMA_H */ #endif /* IEEE1394_DMA_H */

View file

@ -460,7 +460,7 @@ struct video_card {
int dma_running; int dma_running;
/* /*
3) the sleeping semaphore 'sem' - this is used from process context only, 3) the sleeping mutex 'mtx' - this is used from process context only,
to serialize various operations on the video_card. Even though only one to serialize various operations on the video_card. Even though only one
open() is allowed, we still need to prevent multiple threads of execution open() is allowed, we still need to prevent multiple threads of execution
from entering calls like read, write, ioctl, etc. from entering calls like read, write, ioctl, etc.
@ -468,9 +468,9 @@ struct video_card {
I honestly can't think of a good reason to use dv1394 from several threads I honestly can't think of a good reason to use dv1394 from several threads
at once, but we need to serialize anyway to prevent oopses =). at once, but we need to serialize anyway to prevent oopses =).
NOTE: if you need both spinlock and sem, take sem first to avoid deadlock! NOTE: if you need both spinlock and mtx, take mtx first to avoid deadlock!
*/ */
struct semaphore sem; struct mutex mtx;
/* people waiting for buffer space, please form a line here... */ /* people waiting for buffer space, please form a line here... */
wait_queue_head_t waitq; wait_queue_head_t waitq;

View file

@ -95,6 +95,7 @@
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/poll.h> #include <linux/poll.h>
#include <linux/smp_lock.h> #include <linux/smp_lock.h>
#include <linux/mutex.h>
#include <linux/bitops.h> #include <linux/bitops.h>
#include <asm/byteorder.h> #include <asm/byteorder.h>
#include <asm/atomic.h> #include <asm/atomic.h>
@ -110,15 +111,15 @@
#include <linux/compat.h> #include <linux/compat.h>
#include <linux/cdev.h> #include <linux/cdev.h>
#include "ieee1394.h"
#include "ieee1394_types.h"
#include "nodemgr.h"
#include "hosts.h"
#include "ieee1394_core.h"
#include "highlevel.h"
#include "dv1394.h" #include "dv1394.h"
#include "dv1394-private.h" #include "dv1394-private.h"
#include "highlevel.h"
#include "hosts.h"
#include "ieee1394.h"
#include "ieee1394_core.h"
#include "ieee1394_hotplug.h"
#include "ieee1394_types.h"
#include "nodemgr.h"
#include "ohci1394.h" #include "ohci1394.h"
/* DEBUG LEVELS: /* DEBUG LEVELS:
@ -136,13 +137,13 @@
#if DV1394_DEBUG_LEVEL >= 2 #if DV1394_DEBUG_LEVEL >= 2
#define irq_printk( args... ) printk( args ) #define irq_printk( args... ) printk( args )
#else #else
#define irq_printk( args... ) #define irq_printk( args... ) do {} while (0)
#endif #endif
#if DV1394_DEBUG_LEVEL >= 1 #if DV1394_DEBUG_LEVEL >= 1
#define debug_printk( args... ) printk( args) #define debug_printk( args... ) printk( args)
#else #else
#define debug_printk( args... ) #define debug_printk( args... ) do {} while (0)
#endif #endif
/* issue a dummy PCI read to force the preceding write /* issue a dummy PCI read to force the preceding write
@ -247,7 +248,7 @@ static void frame_delete(struct frame *f)
Frame_prepare() must be called OUTSIDE the video->spinlock. Frame_prepare() must be called OUTSIDE the video->spinlock.
However, frame_prepare() must still be serialized, so However, frame_prepare() must still be serialized, so
it should be called WITH the video->sem taken. it should be called WITH the video->mtx taken.
*/ */
static void frame_prepare(struct video_card *video, unsigned int this_frame) static void frame_prepare(struct video_card *video, unsigned int this_frame)
@ -1271,7 +1272,7 @@ static int dv1394_mmap(struct file *file, struct vm_area_struct *vma)
int retval = -EINVAL; int retval = -EINVAL;
/* serialize mmap */ /* serialize mmap */
down(&video->sem); mutex_lock(&video->mtx);
if ( ! video_card_initialized(video) ) { if ( ! video_card_initialized(video) ) {
retval = do_dv1394_init_default(video); retval = do_dv1394_init_default(video);
@ -1281,7 +1282,7 @@ static int dv1394_mmap(struct file *file, struct vm_area_struct *vma)
retval = dma_region_mmap(&video->dv_buf, file, vma); retval = dma_region_mmap(&video->dv_buf, file, vma);
out: out:
up(&video->sem); mutex_unlock(&video->mtx);
return retval; return retval;
} }
@ -1337,17 +1338,17 @@ static ssize_t dv1394_write(struct file *file, const char __user *buffer, size_t
/* serialize this to prevent multi-threaded mayhem */ /* serialize this to prevent multi-threaded mayhem */
if (file->f_flags & O_NONBLOCK) { if (file->f_flags & O_NONBLOCK) {
if (down_trylock(&video->sem)) if (!mutex_trylock(&video->mtx))
return -EAGAIN; return -EAGAIN;
} else { } else {
if (down_interruptible(&video->sem)) if (mutex_lock_interruptible(&video->mtx))
return -ERESTARTSYS; return -ERESTARTSYS;
} }
if ( !video_card_initialized(video) ) { if ( !video_card_initialized(video) ) {
ret = do_dv1394_init_default(video); ret = do_dv1394_init_default(video);
if (ret) { if (ret) {
up(&video->sem); mutex_unlock(&video->mtx);
return ret; return ret;
} }
} }
@ -1418,7 +1419,7 @@ static ssize_t dv1394_write(struct file *file, const char __user *buffer, size_t
remove_wait_queue(&video->waitq, &wait); remove_wait_queue(&video->waitq, &wait);
set_current_state(TASK_RUNNING); set_current_state(TASK_RUNNING);
up(&video->sem); mutex_unlock(&video->mtx);
return ret; return ret;
} }
@ -1434,17 +1435,17 @@ static ssize_t dv1394_read(struct file *file, char __user *buffer, size_t count
/* serialize this to prevent multi-threaded mayhem */ /* serialize this to prevent multi-threaded mayhem */
if (file->f_flags & O_NONBLOCK) { if (file->f_flags & O_NONBLOCK) {
if (down_trylock(&video->sem)) if (!mutex_trylock(&video->mtx))
return -EAGAIN; return -EAGAIN;
} else { } else {
if (down_interruptible(&video->sem)) if (mutex_lock_interruptible(&video->mtx))
return -ERESTARTSYS; return -ERESTARTSYS;
} }
if ( !video_card_initialized(video) ) { if ( !video_card_initialized(video) ) {
ret = do_dv1394_init_default(video); ret = do_dv1394_init_default(video);
if (ret) { if (ret) {
up(&video->sem); mutex_unlock(&video->mtx);
return ret; return ret;
} }
video->continuity_counter = -1; video->continuity_counter = -1;
@ -1526,7 +1527,7 @@ static ssize_t dv1394_read(struct file *file, char __user *buffer, size_t count
remove_wait_queue(&video->waitq, &wait); remove_wait_queue(&video->waitq, &wait);
set_current_state(TASK_RUNNING); set_current_state(TASK_RUNNING);
up(&video->sem); mutex_unlock(&video->mtx);
return ret; return ret;
} }
@ -1547,12 +1548,12 @@ static long dv1394_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
/* serialize this to prevent multi-threaded mayhem */ /* serialize this to prevent multi-threaded mayhem */
if (file->f_flags & O_NONBLOCK) { if (file->f_flags & O_NONBLOCK) {
if (down_trylock(&video->sem)) { if (!mutex_trylock(&video->mtx)) {
unlock_kernel(); unlock_kernel();
return -EAGAIN; return -EAGAIN;
} }
} else { } else {
if (down_interruptible(&video->sem)) { if (mutex_lock_interruptible(&video->mtx)) {
unlock_kernel(); unlock_kernel();
return -ERESTARTSYS; return -ERESTARTSYS;
} }
@ -1778,7 +1779,7 @@ static long dv1394_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
} }
out: out:
up(&video->sem); mutex_unlock(&video->mtx);
unlock_kernel(); unlock_kernel();
return ret; return ret;
} }
@ -2253,7 +2254,7 @@ static int dv1394_init(struct ti_ohci *ohci, enum pal_or_ntsc format, enum modes
clear_bit(0, &video->open); clear_bit(0, &video->open);
spin_lock_init(&video->spinlock); spin_lock_init(&video->spinlock);
video->dma_running = 0; video->dma_running = 0;
init_MUTEX(&video->sem); mutex_init(&video->mtx);
init_waitqueue_head(&video->waitq); init_waitqueue_head(&video->waitq);
video->fasync = NULL; video->fasync = NULL;

View file

@ -64,19 +64,19 @@
#include <linux/ethtool.h> #include <linux/ethtool.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/delay.h> #include <asm/delay.h>
#include <asm/semaphore.h>
#include <net/arp.h> #include <net/arp.h>
#include "config_roms.h"
#include "csr1212.h" #include "csr1212.h"
#include "ieee1394_types.h" #include "eth1394.h"
#include "ieee1394_core.h"
#include "ieee1394_transactions.h"
#include "ieee1394.h"
#include "highlevel.h" #include "highlevel.h"
#include "ieee1394.h"
#include "ieee1394_core.h"
#include "ieee1394_hotplug.h"
#include "ieee1394_transactions.h"
#include "ieee1394_types.h"
#include "iso.h" #include "iso.h"
#include "nodemgr.h" #include "nodemgr.h"
#include "eth1394.h"
#include "config_roms.h"
#define ETH1394_PRINT_G(level, fmt, args...) \ #define ETH1394_PRINT_G(level, fmt, args...) \
printk(level "%s: " fmt, driver_name, ## args) printk(level "%s: " fmt, driver_name, ## args)

View file

@ -1,60 +1,61 @@
#ifndef IEEE1394_HIGHLEVEL_H #ifndef IEEE1394_HIGHLEVEL_H
#define IEEE1394_HIGHLEVEL_H #define IEEE1394_HIGHLEVEL_H
#include <linux/list.h>
#include <linux/spinlock_types.h>
#include <linux/types.h>
struct module;
#include "ieee1394_types.h"
struct hpsb_host;
/* internal to ieee1394 core */
struct hpsb_address_serve { struct hpsb_address_serve {
struct list_head host_list; /* per host list */ struct list_head host_list; /* per host list */
struct list_head hl_list; /* hpsb_highlevel list */
struct list_head hl_list; /* hpsb_highlevel list */ struct hpsb_address_ops *op;
struct hpsb_address_ops *op;
struct hpsb_host *host; struct hpsb_host *host;
u64 start; /* first address handled, quadlet aligned */
/* first address handled and first address behind, quadlet aligned */ u64 end; /* first address behind, quadlet aligned */
u64 start, end;
}; };
/* Only the following structures are of interest to actual highlevel drivers. */
/*
* The above structs are internal to highlevel driver handling. Only the
* following structures are of interest to actual highlevel drivers.
*/
struct hpsb_highlevel { struct hpsb_highlevel {
struct module *owner; struct module *owner;
const char *name; const char *name;
/* Any of the following pointers can legally be NULL, except for /* Any of the following pointers can legally be NULL, except for
* iso_receive which can only be NULL when you don't request * iso_receive which can only be NULL when you don't request
* channels. */ * channels. */
/* New host initialized. Will also be called during /* New host initialized. Will also be called during
* hpsb_register_highlevel for all hosts already installed. */ * hpsb_register_highlevel for all hosts already installed. */
void (*add_host) (struct hpsb_host *host); void (*add_host)(struct hpsb_host *host);
/* Host about to be removed. Will also be called during /* Host about to be removed. Will also be called during
* hpsb_unregister_highlevel once for each host. */ * hpsb_unregister_highlevel once for each host. */
void (*remove_host) (struct hpsb_host *host); void (*remove_host)(struct hpsb_host *host);
/* Host experienced bus reset with possible configuration changes. /* Host experienced bus reset with possible configuration changes.
* Note that this one may occur during interrupt/bottom half handling. * Note that this one may occur during interrupt/bottom half handling.
* You can not expect to be able to do stock hpsb_reads. */ * You can not expect to be able to do stock hpsb_reads. */
void (*host_reset) (struct hpsb_host *host); void (*host_reset)(struct hpsb_host *host);
/* An isochronous packet was received. Channel contains the channel /* An isochronous packet was received. Channel contains the channel
* number for your convenience, it is also contained in the included * number for your convenience, it is also contained in the included
* packet header (first quadlet, CRCs are missing). You may get called * packet header (first quadlet, CRCs are missing). You may get called
* for channel/host combinations you did not request. */ * for channel/host combinations you did not request. */
void (*iso_receive) (struct hpsb_host *host, int channel, void (*iso_receive)(struct hpsb_host *host, int channel,
quadlet_t *data, size_t length); quadlet_t *data, size_t length);
/* A write request was received on either the FCP_COMMAND (direction = /* A write request was received on either the FCP_COMMAND (direction =
* 0) or the FCP_RESPONSE (direction = 1) register. The cts arg * 0) or the FCP_RESPONSE (direction = 1) register. The cts arg
* contains the cts field (first byte of data). */ * contains the cts field (first byte of data). */
void (*fcp_request) (struct hpsb_host *host, int nodeid, int direction, void (*fcp_request)(struct hpsb_host *host, int nodeid, int direction,
int cts, u8 *data, size_t length); int cts, u8 *data, size_t length);
/* These are initialized by the subsystem when the /* These are initialized by the subsystem when the
* hpsb_higlevel is registered. */ * hpsb_higlevel is registered. */
@ -67,61 +68,62 @@ struct hpsb_highlevel {
}; };
struct hpsb_address_ops { struct hpsb_address_ops {
/* /*
* Null function pointers will make the respective operation complete * Null function pointers will make the respective operation complete
* with RCODE_TYPE_ERROR. Makes for easy to implement read-only * with RCODE_TYPE_ERROR. Makes for easy to implement read-only
* registers (just leave everything but read NULL). * registers (just leave everything but read NULL).
* *
* All functions shall return appropriate IEEE 1394 rcodes. * All functions shall return appropriate IEEE 1394 rcodes.
*/ */
/* These functions have to implement block reads for themselves. */ /* These functions have to implement block reads for themselves.
/* These functions either return a response code *
or a negative number. In the first case a response will be generated; in the * These functions either return a response code or a negative number.
later case, no response will be sent and the driver, that handled the request * In the first case a response will be generated. In the latter case,
will send the response itself * no response will be sent and the driver which handled the request
*/ * will send the response itself. */
int (*read) (struct hpsb_host *host, int nodeid, quadlet_t *buffer, int (*read)(struct hpsb_host *host, int nodeid, quadlet_t *buffer,
u64 addr, size_t length, u16 flags); u64 addr, size_t length, u16 flags);
int (*write) (struct hpsb_host *host, int nodeid, int destid, int (*write)(struct hpsb_host *host, int nodeid, int destid,
quadlet_t *data, u64 addr, size_t length, u16 flags); quadlet_t *data, u64 addr, size_t length, u16 flags);
/* Lock transactions: write results of ext_tcode operation into /* Lock transactions: write results of ext_tcode operation into
* *store. */ * *store. */
int (*lock) (struct hpsb_host *host, int nodeid, quadlet_t *store, int (*lock)(struct hpsb_host *host, int nodeid, quadlet_t *store,
u64 addr, quadlet_t data, quadlet_t arg, int ext_tcode, u16 flags); u64 addr, quadlet_t data, quadlet_t arg, int ext_tcode,
int (*lock64) (struct hpsb_host *host, int nodeid, octlet_t *store, u16 flags);
u64 addr, octlet_t data, octlet_t arg, int ext_tcode, u16 flags); int (*lock64)(struct hpsb_host *host, int nodeid, octlet_t *store,
u64 addr, octlet_t data, octlet_t arg, int ext_tcode,
u16 flags);
}; };
void highlevel_add_host(struct hpsb_host *host); void highlevel_add_host(struct hpsb_host *host);
void highlevel_remove_host(struct hpsb_host *host); void highlevel_remove_host(struct hpsb_host *host);
void highlevel_host_reset(struct hpsb_host *host); void highlevel_host_reset(struct hpsb_host *host);
/*
/* these functions are called to handle transactions. They are called, when * These functions are called to handle transactions. They are called when a
a packet arrives. The flags argument contains the second word of the first header * packet arrives. The flags argument contains the second word of the first
quadlet of the incoming packet (containing transaction label, retry code, * header quadlet of the incoming packet (containing transaction label, retry
transaction code and priority). These functions either return a response code * code, transaction code and priority). These functions either return a
or a negative number. In the first case a response will be generated; in the * response code or a negative number. In the first case a response will be
later case, no response will be sent and the driver, that handled the request * generated. In the latter case, no response will be sent and the driver which
will send the response itself. * handled the request will send the response itself.
*/ */
int highlevel_read(struct hpsb_host *host, int nodeid, void *data, int highlevel_read(struct hpsb_host *host, int nodeid, void *data, u64 addr,
u64 addr, unsigned int length, u16 flags); unsigned int length, u16 flags);
int highlevel_write(struct hpsb_host *host, int nodeid, int destid, int highlevel_write(struct hpsb_host *host, int nodeid, int destid, void *data,
void *data, u64 addr, unsigned int length, u16 flags); u64 addr, unsigned int length, u16 flags);
int highlevel_lock(struct hpsb_host *host, int nodeid, quadlet_t *store, int highlevel_lock(struct hpsb_host *host, int nodeid, quadlet_t *store,
u64 addr, quadlet_t data, quadlet_t arg, int ext_tcode, u16 flags); u64 addr, quadlet_t data, quadlet_t arg, int ext_tcode,
u16 flags);
int highlevel_lock64(struct hpsb_host *host, int nodeid, octlet_t *store, int highlevel_lock64(struct hpsb_host *host, int nodeid, octlet_t *store,
u64 addr, octlet_t data, octlet_t arg, int ext_tcode, u16 flags); u64 addr, octlet_t data, octlet_t arg, int ext_tcode,
u16 flags);
void highlevel_iso_receive(struct hpsb_host *host, void *data, void highlevel_iso_receive(struct hpsb_host *host, void *data, size_t length);
size_t length);
void highlevel_fcp_request(struct hpsb_host *host, int nodeid, int direction, void highlevel_fcp_request(struct hpsb_host *host, int nodeid, int direction,
void *data, size_t length); void *data, size_t length);
/* /*
* Register highlevel driver. The name pointer has to stay valid at all times * Register highlevel driver. The name pointer has to stay valid at all times
@ -132,13 +134,15 @@ void hpsb_unregister_highlevel(struct hpsb_highlevel *hl);
/* /*
* Register handlers for host address spaces. Start and end are 48 bit pointers * Register handlers for host address spaces. Start and end are 48 bit pointers
* and have to be quadlet aligned (end points to the first address behind the * and have to be quadlet aligned. Argument "end" points to the first address
* handled addresses. This function can be called multiple times for a single * behind the handled addresses. This function can be called multiple times for
* hpsb_highlevel to implement sparse register sets. The requested region must * a single hpsb_highlevel to implement sparse register sets. The requested
* not overlap any previously allocated region, otherwise registering will fail. * region must not overlap any previously allocated region, otherwise
* registering will fail.
* *
* It returns true for successful allocation. There is no unregister function, * It returns true for successful allocation. Address spaces can be
* all address spaces are deallocated together with the hpsb_highlevel. * unregistered with hpsb_unregister_addrspace. All remaining address spaces
* are automatically deallocated together with the hpsb_highlevel.
*/ */
u64 hpsb_allocate_and_register_addrspace(struct hpsb_highlevel *hl, u64 hpsb_allocate_and_register_addrspace(struct hpsb_highlevel *hl,
struct hpsb_host *host, struct hpsb_host *host,
@ -146,20 +150,18 @@ u64 hpsb_allocate_and_register_addrspace(struct hpsb_highlevel *hl,
u64 size, u64 alignment, u64 size, u64 alignment,
u64 start, u64 end); u64 start, u64 end);
int hpsb_register_addrspace(struct hpsb_highlevel *hl, struct hpsb_host *host, int hpsb_register_addrspace(struct hpsb_highlevel *hl, struct hpsb_host *host,
struct hpsb_address_ops *ops, u64 start, u64 end); struct hpsb_address_ops *ops, u64 start, u64 end);
int hpsb_unregister_addrspace(struct hpsb_highlevel *hl, struct hpsb_host *host, int hpsb_unregister_addrspace(struct hpsb_highlevel *hl, struct hpsb_host *host,
u64 start); u64 start);
/* /*
* Enable or disable receving a certain isochronous channel through the * Enable or disable receving a certain isochronous channel through the
* iso_receive op. * iso_receive op.
*/ */
int hpsb_listen_channel(struct hpsb_highlevel *hl, struct hpsb_host *host, int hpsb_listen_channel(struct hpsb_highlevel *hl, struct hpsb_host *host,
unsigned int channel); unsigned int channel);
void hpsb_unlisten_channel(struct hpsb_highlevel *hl, struct hpsb_host *host, void hpsb_unlisten_channel(struct hpsb_highlevel *hl, struct hpsb_host *host,
unsigned int channel); unsigned int channel);
/* Retrieve a hostinfo pointer bound to this driver/host */ /* Retrieve a hostinfo pointer bound to this driver/host */
void *hpsb_get_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host); void *hpsb_get_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host);
@ -172,19 +174,24 @@ void *hpsb_create_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host,
void hpsb_destroy_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host); void hpsb_destroy_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host);
/* Set an alternate lookup key for the hostinfo bound to this driver/host */ /* Set an alternate lookup key for the hostinfo bound to this driver/host */
void hpsb_set_hostinfo_key(struct hpsb_highlevel *hl, struct hpsb_host *host, unsigned long key); void hpsb_set_hostinfo_key(struct hpsb_highlevel *hl, struct hpsb_host *host,
unsigned long key);
/* Retrieve the alternate lookup key for the hostinfo bound to this driver/host */ /* Retrieve the alternate lookup key for the hostinfo bound to this
unsigned long hpsb_get_hostinfo_key(struct hpsb_highlevel *hl, struct hpsb_host *host); * driver/host */
unsigned long hpsb_get_hostinfo_key(struct hpsb_highlevel *hl,
struct hpsb_host *host);
/* Retrieve a hostinfo pointer bound to this driver using its alternate key */ /* Retrieve a hostinfo pointer bound to this driver using its alternate key */
void *hpsb_get_hostinfo_bykey(struct hpsb_highlevel *hl, unsigned long key); void *hpsb_get_hostinfo_bykey(struct hpsb_highlevel *hl, unsigned long key);
/* Set the hostinfo pointer to something useful. Usually follows a call to /* Set the hostinfo pointer to something useful. Usually follows a call to
* hpsb_create_hostinfo, where the size is 0. */ * hpsb_create_hostinfo, where the size is 0. */
int hpsb_set_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host, void *data); int hpsb_set_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host,
void *data);
/* Retrieve hpsb_host using a highlevel handle and a key */ /* Retrieve hpsb_host using a highlevel handle and a key */
struct hpsb_host *hpsb_get_host_bykey(struct hpsb_highlevel *hl, unsigned long key); struct hpsb_host *hpsb_get_host_bykey(struct hpsb_highlevel *hl,
unsigned long key);
#endif /* IEEE1394_HIGHLEVEL_H */ #endif /* IEEE1394_HIGHLEVEL_H */

View file

@ -90,6 +90,16 @@ static int alloc_hostnum_cb(struct hpsb_host *host, void *__data)
return 0; return 0;
} }
/*
* The pending_packet_queue is special in that it's processed
* from hardirq context too (such as hpsb_bus_reset()). Hence
* split the lock class from the usual networking skb-head
* lock class by using a separate key for it:
*/
static struct lock_class_key pending_packet_queue_key;
static DEFINE_MUTEX(host_num_alloc);
/** /**
* hpsb_alloc_host - allocate a new host controller. * hpsb_alloc_host - allocate a new host controller.
* @drv: the driver that will manage the host controller * @drv: the driver that will manage the host controller
@ -105,16 +115,6 @@ static int alloc_hostnum_cb(struct hpsb_host *host, void *__data)
* Return Value: a pointer to the &hpsb_host if successful, %NULL if * Return Value: a pointer to the &hpsb_host if successful, %NULL if
* no memory was available. * no memory was available.
*/ */
static DEFINE_MUTEX(host_num_alloc);
/*
* The pending_packet_queue is special in that it's processed
* from hardirq context too (such as hpsb_bus_reset()). Hence
* split the lock class from the usual networking skb-head
* lock class by using a separate key for it:
*/
static struct lock_class_key pending_packet_queue_key;
struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra, struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra,
struct device *dev) struct device *dev)
{ {
@ -143,9 +143,6 @@ struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra,
for (i = 2; i < 16; i++) for (i = 2; i < 16; i++)
h->csr.gen_timestamp[i] = jiffies - 60 * HZ; h->csr.gen_timestamp[i] = jiffies - 60 * HZ;
for (i = 0; i < ARRAY_SIZE(h->tpool); i++)
HPSB_TPOOL_INIT(&h->tpool[i]);
atomic_set(&h->generation, 0); atomic_set(&h->generation, 0);
INIT_WORK(&h->delayed_reset, delayed_reset_bus, h); INIT_WORK(&h->delayed_reset, delayed_reset_bus, h);

View file

@ -2,17 +2,19 @@
#define _IEEE1394_HOSTS_H #define _IEEE1394_HOSTS_H
#include <linux/device.h> #include <linux/device.h>
#include <linux/wait.h>
#include <linux/list.h> #include <linux/list.h>
#include <linux/timer.h>
#include <linux/skbuff.h> #include <linux/skbuff.h>
#include <linux/timer.h>
#include <linux/types.h>
#include <linux/workqueue.h>
#include <asm/atomic.h>
#include <asm/semaphore.h> struct pci_dev;
struct module;
#include "ieee1394_types.h" #include "ieee1394_types.h"
#include "csr.h" #include "csr.h"
struct hpsb_packet; struct hpsb_packet;
struct hpsb_iso; struct hpsb_iso;
@ -33,7 +35,6 @@ struct hpsb_host {
int node_count; /* number of identified nodes on this bus */ int node_count; /* number of identified nodes on this bus */
int selfid_count; /* total number of SelfIDs received */ int selfid_count; /* total number of SelfIDs received */
int nodes_active; /* number of nodes with active link layer */ int nodes_active; /* number of nodes with active link layer */
u8 speed[ALL_NODES]; /* speed between each node and local node */
nodeid_t node_id; /* node ID of this host */ nodeid_t node_id; /* node ID of this host */
nodeid_t irm_id; /* ID of this bus' isochronous resource manager */ nodeid_t irm_id; /* ID of this bus' isochronous resource manager */
@ -53,32 +54,30 @@ struct hpsb_host {
int reset_retries; int reset_retries;
quadlet_t *topology_map; quadlet_t *topology_map;
u8 *speed_map; u8 *speed_map;
struct csr_control csr;
/* Per node tlabel pool allocation */
struct hpsb_tlabel_pool tpool[ALL_NODES];
struct hpsb_host_driver *driver;
struct pci_dev *pdev;
int id; int id;
struct hpsb_host_driver *driver;
struct pci_dev *pdev;
struct device device; struct device device;
struct class_device class_dev; struct class_device class_dev;
int update_config_rom; int update_config_rom;
struct work_struct delayed_reset; struct work_struct delayed_reset;
unsigned int config_roms; unsigned int config_roms;
struct list_head addr_space; struct list_head addr_space;
u64 low_addr_space; /* upper bound of physical DMA area */ u64 low_addr_space; /* upper bound of physical DMA area */
u64 middle_addr_space; /* upper bound of posted write area */ u64 middle_addr_space; /* upper bound of posted write area */
u8 speed[ALL_NODES]; /* speed between each node and local node */
/* per node tlabel allocation */
u8 next_tl[ALL_NODES];
struct { DECLARE_BITMAP(map, 64); } tl_pool[ALL_NODES];
struct csr_control csr;
}; };
enum devctl_cmd { enum devctl_cmd {
/* Host is requested to reset its bus and cancel all outstanding async /* Host is requested to reset its bus and cancel all outstanding async
* requests. If arg == 1, it shall also attempt to become root on the * requests. If arg == 1, it shall also attempt to become root on the
@ -112,7 +111,7 @@ enum devctl_cmd {
enum isoctl_cmd { enum isoctl_cmd {
/* rawiso API - see iso.h for the meanings of these commands /* rawiso API - see iso.h for the meanings of these commands
(they correspond exactly to the hpsb_iso_* API functions) * (they correspond exactly to the hpsb_iso_* API functions)
* INIT = allocate resources * INIT = allocate resources
* START = begin transmission/reception * START = begin transmission/reception
* STOP = halt transmission/reception * STOP = halt transmission/reception
@ -160,7 +159,8 @@ struct hpsb_host_driver {
/* The hardware driver may optionally support a function that is used /* The hardware driver may optionally support a function that is used
* to set the hardware ConfigROM if the hardware supports handling * to set the hardware ConfigROM if the hardware supports handling
* reads to the ConfigROM on its own. */ * reads to the ConfigROM on its own. */
void (*set_hw_config_rom) (struct hpsb_host *host, quadlet_t *config_rom); void (*set_hw_config_rom)(struct hpsb_host *host,
quadlet_t *config_rom);
/* This function shall implement packet transmission based on /* This function shall implement packet transmission based on
* packet->type. It shall CRC both parts of the packet (unless * packet->type. It shall CRC both parts of the packet (unless
@ -170,20 +170,21 @@ struct hpsb_host_driver {
* called. Return 0 on success, negative errno on failure. * called. Return 0 on success, negative errno on failure.
* NOTE: The function must be callable in interrupt context. * NOTE: The function must be callable in interrupt context.
*/ */
int (*transmit_packet) (struct hpsb_host *host, int (*transmit_packet)(struct hpsb_host *host,
struct hpsb_packet *packet); struct hpsb_packet *packet);
/* This function requests miscellanous services from the driver, see /* This function requests miscellanous services from the driver, see
* above for command codes and expected actions. Return -1 for unknown * above for command codes and expected actions. Return -1 for unknown
* command, though that should never happen. * command, though that should never happen.
*/ */
int (*devctl) (struct hpsb_host *host, enum devctl_cmd command, int arg); int (*devctl)(struct hpsb_host *host, enum devctl_cmd command, int arg);
/* ISO transmission/reception functions. Return 0 on success, -1 /* ISO transmission/reception functions. Return 0 on success, -1
* (or -EXXX errno code) on failure. If the low-level driver does not * (or -EXXX errno code) on failure. If the low-level driver does not
* support the new ISO API, set isoctl to NULL. * support the new ISO API, set isoctl to NULL.
*/ */
int (*isoctl) (struct hpsb_iso *iso, enum isoctl_cmd command, unsigned long arg); int (*isoctl)(struct hpsb_iso *iso, enum isoctl_cmd command,
unsigned long arg);
/* This function is mainly to redirect local CSR reads/locks to the iso /* This function is mainly to redirect local CSR reads/locks to the iso
* management registers (bus manager id, bandwidth available, channels * management registers (bus manager id, bandwidth available, channels
@ -196,19 +197,11 @@ struct hpsb_host_driver {
quadlet_t data, quadlet_t compare); quadlet_t data, quadlet_t compare);
}; };
struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra, struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra,
struct device *dev); struct device *dev);
int hpsb_add_host(struct hpsb_host *host); int hpsb_add_host(struct hpsb_host *host);
void hpsb_remove_host(struct hpsb_host *h); void hpsb_remove_host(struct hpsb_host *h);
/* The following 2 functions are deprecated and will be removed when the
* raw1394/libraw1394 update is complete. */
int hpsb_update_config_rom(struct hpsb_host *host,
const quadlet_t *new_rom, size_t size, unsigned char rom_version);
int hpsb_get_config_rom(struct hpsb_host *host, quadlet_t *buffer,
size_t buffersize, size_t *rom_size, unsigned char *rom_version);
/* Updates the configuration rom image of a host. rom_version must be the /* Updates the configuration rom image of a host. rom_version must be the
* current version, otherwise it will fail with return value -1. If this * current version, otherwise it will fail with return value -1. If this
* host does not support config-rom-update, it will return -EINVAL. * host does not support config-rom-update, it will return -EINVAL.

View file

@ -1,5 +1,7 @@
/* Base file for all ieee1394 ioctl's. Linux-1394 has allocated base '#' /*
* with a range of 0x00-0x3f. */ * Base file for all ieee1394 ioctl's.
* Linux-1394 has allocated base '#' with a range of 0x00-0x3f.
*/
#ifndef __IEEE1394_IOCTL_H #ifndef __IEEE1394_IOCTL_H
#define __IEEE1394_IOCTL_H #define __IEEE1394_IOCTL_H
@ -96,8 +98,7 @@
_IOW ('#', 0x27, struct raw1394_iso_packets) _IOW ('#', 0x27, struct raw1394_iso_packets)
#define RAW1394_IOC_ISO_XMIT_SYNC \ #define RAW1394_IOC_ISO_XMIT_SYNC \
_IO ('#', 0x28) _IO ('#', 0x28)
#define RAW1394_IOC_ISO_RECV_FLUSH \ #define RAW1394_IOC_ISO_RECV_FLUSH \
_IO ('#', 0x29) _IO ('#', 0x29)
#endif /* __IEEE1394_IOCTL_H */ #endif /* __IEEE1394_IOCTL_H */

View file

@ -5,77 +5,78 @@
#ifndef _IEEE1394_IEEE1394_H #ifndef _IEEE1394_IEEE1394_H
#define _IEEE1394_IEEE1394_H #define _IEEE1394_IEEE1394_H
#define TCODE_WRITEQ 0x0 #define TCODE_WRITEQ 0x0
#define TCODE_WRITEB 0x1 #define TCODE_WRITEB 0x1
#define TCODE_WRITE_RESPONSE 0x2 #define TCODE_WRITE_RESPONSE 0x2
#define TCODE_READQ 0x4 #define TCODE_READQ 0x4
#define TCODE_READB 0x5 #define TCODE_READB 0x5
#define TCODE_READQ_RESPONSE 0x6 #define TCODE_READQ_RESPONSE 0x6
#define TCODE_READB_RESPONSE 0x7 #define TCODE_READB_RESPONSE 0x7
#define TCODE_CYCLE_START 0x8 #define TCODE_CYCLE_START 0x8
#define TCODE_LOCK_REQUEST 0x9 #define TCODE_LOCK_REQUEST 0x9
#define TCODE_ISO_DATA 0xa #define TCODE_ISO_DATA 0xa
#define TCODE_STREAM_DATA 0xa #define TCODE_STREAM_DATA 0xa
#define TCODE_LOCK_RESPONSE 0xb #define TCODE_LOCK_RESPONSE 0xb
#define RCODE_COMPLETE 0x0 #define RCODE_COMPLETE 0x0
#define RCODE_CONFLICT_ERROR 0x4 #define RCODE_CONFLICT_ERROR 0x4
#define RCODE_DATA_ERROR 0x5 #define RCODE_DATA_ERROR 0x5
#define RCODE_TYPE_ERROR 0x6 #define RCODE_TYPE_ERROR 0x6
#define RCODE_ADDRESS_ERROR 0x7 #define RCODE_ADDRESS_ERROR 0x7
#define EXTCODE_MASK_SWAP 0x1 #define EXTCODE_MASK_SWAP 0x1
#define EXTCODE_COMPARE_SWAP 0x2 #define EXTCODE_COMPARE_SWAP 0x2
#define EXTCODE_FETCH_ADD 0x3 #define EXTCODE_FETCH_ADD 0x3
#define EXTCODE_LITTLE_ADD 0x4 #define EXTCODE_LITTLE_ADD 0x4
#define EXTCODE_BOUNDED_ADD 0x5 #define EXTCODE_BOUNDED_ADD 0x5
#define EXTCODE_WRAP_ADD 0x6 #define EXTCODE_WRAP_ADD 0x6
#define ACK_COMPLETE 0x1 #define ACK_COMPLETE 0x1
#define ACK_PENDING 0x2 #define ACK_PENDING 0x2
#define ACK_BUSY_X 0x4 #define ACK_BUSY_X 0x4
#define ACK_BUSY_A 0x5 #define ACK_BUSY_A 0x5
#define ACK_BUSY_B 0x6 #define ACK_BUSY_B 0x6
#define ACK_TARDY 0xb #define ACK_TARDY 0xb
#define ACK_CONFLICT_ERROR 0xc #define ACK_CONFLICT_ERROR 0xc
#define ACK_DATA_ERROR 0xd #define ACK_DATA_ERROR 0xd
#define ACK_TYPE_ERROR 0xe #define ACK_TYPE_ERROR 0xe
#define ACK_ADDRESS_ERROR 0xf #define ACK_ADDRESS_ERROR 0xf
/* Non-standard "ACK codes" for internal use */ /* Non-standard "ACK codes" for internal use */
#define ACKX_NONE (-1) #define ACKX_NONE (-1)
#define ACKX_SEND_ERROR (-2) #define ACKX_SEND_ERROR (-2)
#define ACKX_ABORTED (-3) #define ACKX_ABORTED (-3)
#define ACKX_TIMEOUT (-4) #define ACKX_TIMEOUT (-4)
#define IEEE1394_SPEED_100 0x00
#define IEEE1394_SPEED_200 0x01
#define IEEE1394_SPEED_400 0x02
#define IEEE1394_SPEED_800 0x03
#define IEEE1394_SPEED_1600 0x04
#define IEEE1394_SPEED_3200 0x05
#define IEEE1394_SPEED_100 0x00
#define IEEE1394_SPEED_200 0x01
#define IEEE1394_SPEED_400 0x02
#define IEEE1394_SPEED_800 0x03
#define IEEE1394_SPEED_1600 0x04
#define IEEE1394_SPEED_3200 0x05
/* The current highest tested speed supported by the subsystem */ /* The current highest tested speed supported by the subsystem */
#define IEEE1394_SPEED_MAX IEEE1394_SPEED_800 #define IEEE1394_SPEED_MAX IEEE1394_SPEED_800
/* Maps speed values above to a string representation */ /* Maps speed values above to a string representation */
extern const char *hpsb_speedto_str[]; extern const char *hpsb_speedto_str[];
/* 1394a cable PHY packets */ /* 1394a cable PHY packets */
#define SELFID_PWRCL_NO_POWER 0x0 #define SELFID_PWRCL_NO_POWER 0x0
#define SELFID_PWRCL_PROVIDE_15W 0x1 #define SELFID_PWRCL_PROVIDE_15W 0x1
#define SELFID_PWRCL_PROVIDE_30W 0x2 #define SELFID_PWRCL_PROVIDE_30W 0x2
#define SELFID_PWRCL_PROVIDE_45W 0x3 #define SELFID_PWRCL_PROVIDE_45W 0x3
#define SELFID_PWRCL_USE_1W 0x4 #define SELFID_PWRCL_USE_1W 0x4
#define SELFID_PWRCL_USE_3W 0x5 #define SELFID_PWRCL_USE_3W 0x5
#define SELFID_PWRCL_USE_6W 0x6 #define SELFID_PWRCL_USE_6W 0x6
#define SELFID_PWRCL_USE_10W 0x7 #define SELFID_PWRCL_USE_10W 0x7
#define SELFID_PORT_CHILD 0x3 #define SELFID_PORT_CHILD 0x3
#define SELFID_PORT_PARENT 0x2 #define SELFID_PORT_PARENT 0x2
#define SELFID_PORT_NCONN 0x1 #define SELFID_PORT_NCONN 0x1
#define SELFID_PORT_NONE 0x0 #define SELFID_PORT_NONE 0x0
#define SELFID_SPEED_UNKNOWN 0x3 /* 1394b PHY */
#define PHYPACKET_LINKON 0x40000000 #define PHYPACKET_LINKON 0x40000000
#define PHYPACKET_PHYCONFIG_R 0x00800000 #define PHYPACKET_PHYCONFIG_R 0x00800000
@ -91,76 +92,76 @@ extern const char *hpsb_speedto_str[];
#define EXTPHYPACKET_TYPEMASK 0xC0FC0000 #define EXTPHYPACKET_TYPEMASK 0xC0FC0000
#define PHYPACKET_PORT_SHIFT 24 #define PHYPACKET_PORT_SHIFT 24
#define PHYPACKET_GAPCOUNT_SHIFT 16 #define PHYPACKET_GAPCOUNT_SHIFT 16
/* 1394a PHY register map bitmasks */ /* 1394a PHY register map bitmasks */
#define PHY_00_PHYSICAL_ID 0xFC #define PHY_00_PHYSICAL_ID 0xFC
#define PHY_00_R 0x02 /* Root */ #define PHY_00_R 0x02 /* Root */
#define PHY_00_PS 0x01 /* Power Status*/ #define PHY_00_PS 0x01 /* Power Status*/
#define PHY_01_RHB 0x80 /* Root Hold-Off */ #define PHY_01_RHB 0x80 /* Root Hold-Off */
#define PHY_01_IBR 0x80 /* Initiate Bus Reset */ #define PHY_01_IBR 0x80 /* Initiate Bus Reset */
#define PHY_01_GAP_COUNT 0x3F #define PHY_01_GAP_COUNT 0x3F
#define PHY_02_EXTENDED 0xE0 /* 0x7 for 1394a-compliant PHY */ #define PHY_02_EXTENDED 0xE0 /* 0x7 for 1394a-compliant PHY */
#define PHY_02_TOTAL_PORTS 0x1F #define PHY_02_TOTAL_PORTS 0x1F
#define PHY_03_MAX_SPEED 0xE0 #define PHY_03_MAX_SPEED 0xE0
#define PHY_03_DELAY 0x0F #define PHY_03_DELAY 0x0F
#define PHY_04_LCTRL 0x80 /* Link Active Report Control */ #define PHY_04_LCTRL 0x80 /* Link Active Report Control */
#define PHY_04_CONTENDER 0x40 #define PHY_04_CONTENDER 0x40
#define PHY_04_JITTER 0x38 #define PHY_04_JITTER 0x38
#define PHY_04_PWR_CLASS 0x07 /* Power Class */ #define PHY_04_PWR_CLASS 0x07 /* Power Class */
#define PHY_05_WATCHDOG 0x80 #define PHY_05_WATCHDOG 0x80
#define PHY_05_ISBR 0x40 /* Initiate Short Bus Reset */ #define PHY_05_ISBR 0x40 /* Initiate Short Bus Reset */
#define PHY_05_LOOP 0x20 /* Loop Detect */ #define PHY_05_LOOP 0x20 /* Loop Detect */
#define PHY_05_PWR_FAIL 0x10 /* Cable Power Failure Detect */ #define PHY_05_PWR_FAIL 0x10 /* Cable Power Failure Detect */
#define PHY_05_TIMEOUT 0x08 /* Arbitration State Machine Timeout */ #define PHY_05_TIMEOUT 0x08 /* Arbitration State Machine Timeout */
#define PHY_05_PORT_EVENT 0x04 /* Port Event Detect */ #define PHY_05_PORT_EVENT 0x04 /* Port Event Detect */
#define PHY_05_ENAB_ACCEL 0x02 /* Enable Arbitration Acceleration */ #define PHY_05_ENAB_ACCEL 0x02 /* Enable Arbitration Acceleration */
#define PHY_05_ENAB_MULTI 0x01 /* Ena. Multispeed Packet Concatenation */ #define PHY_05_ENAB_MULTI 0x01 /* Ena. Multispeed Packet Concatenation */
#include <asm/byteorder.h> #include <asm/byteorder.h>
#ifdef __BIG_ENDIAN_BITFIELD #ifdef __BIG_ENDIAN_BITFIELD
struct selfid { struct selfid {
u32 packet_identifier:2; /* always binary 10 */ u32 packet_identifier:2; /* always binary 10 */
u32 phy_id:6; u32 phy_id:6;
/* byte */ /* byte */
u32 extended:1; /* if true is struct ext_selfid */ u32 extended:1; /* if true is struct ext_selfid */
u32 link_active:1; u32 link_active:1;
u32 gap_count:6; u32 gap_count:6;
/* byte */ /* byte */
u32 speed:2; u32 speed:2;
u32 phy_delay:2; u32 phy_delay:2;
u32 contender:1; u32 contender:1;
u32 power_class:3; u32 power_class:3;
/* byte */ /* byte */
u32 port0:2; u32 port0:2;
u32 port1:2; u32 port1:2;
u32 port2:2; u32 port2:2;
u32 initiated_reset:1; u32 initiated_reset:1;
u32 more_packets:1; u32 more_packets:1;
} __attribute__((packed)); } __attribute__((packed));
struct ext_selfid { struct ext_selfid {
u32 packet_identifier:2; /* always binary 10 */ u32 packet_identifier:2; /* always binary 10 */
u32 phy_id:6; u32 phy_id:6;
/* byte */ /* byte */
u32 extended:1; /* if false is struct selfid */ u32 extended:1; /* if false is struct selfid */
u32 seq_nr:3; u32 seq_nr:3;
u32 reserved:2; u32 reserved:2;
u32 porta:2; u32 porta:2;
/* byte */ /* byte */
u32 portb:2; u32 portb:2;
u32 portc:2; u32 portc:2;
u32 portd:2; u32 portd:2;
u32 porte:2; u32 porte:2;
/* byte */ /* byte */
u32 portf:2; u32 portf:2;
u32 portg:2; u32 portg:2;
u32 porth:2; u32 porth:2;
u32 reserved2:1; u32 reserved2:1;
u32 more_packets:1; u32 more_packets:1;
} __attribute__((packed)); } __attribute__((packed));
#elif defined __LITTLE_ENDIAN_BITFIELD /* __BIG_ENDIAN_BITFIELD */ #elif defined __LITTLE_ENDIAN_BITFIELD /* __BIG_ENDIAN_BITFIELD */
@ -171,49 +172,48 @@ struct ext_selfid {
*/ */
struct selfid { struct selfid {
u32 phy_id:6; u32 phy_id:6;
u32 packet_identifier:2; /* always binary 10 */ u32 packet_identifier:2; /* always binary 10 */
/* byte */ /* byte */
u32 gap_count:6; u32 gap_count:6;
u32 link_active:1; u32 link_active:1;
u32 extended:1; /* if true is struct ext_selfid */ u32 extended:1; /* if true is struct ext_selfid */
/* byte */ /* byte */
u32 power_class:3; u32 power_class:3;
u32 contender:1; u32 contender:1;
u32 phy_delay:2; u32 phy_delay:2;
u32 speed:2; u32 speed:2;
/* byte */ /* byte */
u32 more_packets:1; u32 more_packets:1;
u32 initiated_reset:1; u32 initiated_reset:1;
u32 port2:2; u32 port2:2;
u32 port1:2; u32 port1:2;
u32 port0:2; u32 port0:2;
} __attribute__((packed)); } __attribute__((packed));
struct ext_selfid { struct ext_selfid {
u32 phy_id:6; u32 phy_id:6;
u32 packet_identifier:2; /* always binary 10 */ u32 packet_identifier:2; /* always binary 10 */
/* byte */ /* byte */
u32 porta:2; u32 porta:2;
u32 reserved:2; u32 reserved:2;
u32 seq_nr:3; u32 seq_nr:3;
u32 extended:1; /* if false is struct selfid */ u32 extended:1; /* if false is struct selfid */
/* byte */ /* byte */
u32 porte:2; u32 porte:2;
u32 portd:2; u32 portd:2;
u32 portc:2; u32 portc:2;
u32 portb:2; u32 portb:2;
/* byte */ /* byte */
u32 more_packets:1; u32 more_packets:1;
u32 reserved2:1; u32 reserved2:1;
u32 porth:2; u32 porth:2;
u32 portg:2; u32 portg:2;
u32 portf:2; u32 portf:2;
} __attribute__((packed)); } __attribute__((packed));
#else #else
#error What? PDP endian? #error What? PDP endian?
#endif /* __BIG_ENDIAN_BITFIELD */ #endif /* __BIG_ENDIAN_BITFIELD */
#endif /* _IEEE1394_IEEE1394_H */ #endif /* _IEEE1394_IEEE1394_H */

View file

@ -35,7 +35,6 @@
#include <linux/kthread.h> #include <linux/kthread.h>
#include <asm/byteorder.h> #include <asm/byteorder.h>
#include <asm/semaphore.h>
#include "ieee1394_types.h" #include "ieee1394_types.h"
#include "ieee1394.h" #include "ieee1394.h"
@ -86,7 +85,7 @@ static void dump_packet(const char *text, quadlet_t *data, int size, int speed)
printk("\n"); printk("\n");
} }
#else #else
#define dump_packet(a,b,c,d) #define dump_packet(a,b,c,d) do {} while (0)
#endif #endif
static void abort_requests(struct hpsb_host *host); static void abort_requests(struct hpsb_host *host);
@ -355,10 +354,12 @@ static void build_speed_map(struct hpsb_host *host, int nodecount)
} }
} }
#if SELFID_SPEED_UNKNOWN != IEEE1394_SPEED_MAX
/* assume maximum speed for 1394b PHYs, nodemgr will correct it */ /* assume maximum speed for 1394b PHYs, nodemgr will correct it */
for (n = 0; n < nodecount; n++) for (n = 0; n < nodecount; n++)
if (speedcap[n] == 3) if (speedcap[n] == SELFID_SPEED_UNKNOWN)
speedcap[n] = IEEE1394_SPEED_MAX; speedcap[n] = IEEE1394_SPEED_MAX;
#endif
} }
@ -1169,7 +1170,7 @@ static void __exit ieee1394_cleanup(void)
unregister_chrdev_region(IEEE1394_CORE_DEV, 256); unregister_chrdev_region(IEEE1394_CORE_DEV, 256);
} }
module_init(ieee1394_init); fs_initcall(ieee1394_init); /* same as ohci1394 */
module_exit(ieee1394_cleanup); module_exit(ieee1394_cleanup);
/* Exported symbols */ /* Exported symbols */

View file

@ -1,12 +1,15 @@
#ifndef _IEEE1394_CORE_H #ifndef _IEEE1394_CORE_H
#define _IEEE1394_CORE_H #define _IEEE1394_CORE_H
#include <linux/slab.h> #include <linux/device.h>
#include <linux/fs.h>
#include <linux/list.h>
#include <linux/skbuff.h>
#include <linux/types.h>
#include <asm/atomic.h> #include <asm/atomic.h>
#include <asm/semaphore.h>
#include "hosts.h"
#include "hosts.h"
#include "ieee1394_types.h"
struct hpsb_packet { struct hpsb_packet {
/* This struct is basically read-only for hosts with the exception of /* This struct is basically read-only for hosts with the exception of
@ -58,7 +61,6 @@ struct hpsb_packet {
size_t header_size; size_t header_size;
size_t data_size; size_t data_size;
struct hpsb_host *host; struct hpsb_host *host;
unsigned int generation; unsigned int generation;
@ -80,7 +82,7 @@ struct hpsb_packet {
/* Set a task for when a packet completes */ /* Set a task for when a packet completes */
void hpsb_set_packet_complete_task(struct hpsb_packet *packet, void hpsb_set_packet_complete_task(struct hpsb_packet *packet,
void (*routine)(void *), void *data); void (*routine)(void *), void *data);
static inline struct hpsb_packet *driver_packet(struct list_head *l) static inline struct hpsb_packet *driver_packet(struct list_head *l)
{ {
@ -92,7 +94,6 @@ void abort_timedouts(unsigned long __opaque);
struct hpsb_packet *hpsb_alloc_packet(size_t data_size); struct hpsb_packet *hpsb_alloc_packet(size_t data_size);
void hpsb_free_packet(struct hpsb_packet *packet); void hpsb_free_packet(struct hpsb_packet *packet);
/* /*
* Generation counter for the complete 1394 subsystem. Generation gets * Generation counter for the complete 1394 subsystem. Generation gets
* incremented on every change in the subsystem (e.g. bus reset). * incremented on every change in the subsystem (e.g. bus reset).
@ -204,10 +205,14 @@ void hpsb_packet_received(struct hpsb_host *host, quadlet_t *data, size_t size,
#define IEEE1394_MINOR_BLOCK_EXPERIMENTAL 15 #define IEEE1394_MINOR_BLOCK_EXPERIMENTAL 15
#define IEEE1394_CORE_DEV MKDEV(IEEE1394_MAJOR, 0) #define IEEE1394_CORE_DEV MKDEV(IEEE1394_MAJOR, 0)
#define IEEE1394_RAW1394_DEV MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_RAW1394 * 16) #define IEEE1394_RAW1394_DEV MKDEV(IEEE1394_MAJOR, \
#define IEEE1394_VIDEO1394_DEV MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_VIDEO1394 * 16) IEEE1394_MINOR_BLOCK_RAW1394 * 16)
#define IEEE1394_DV1394_DEV MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_DV1394 * 16) #define IEEE1394_VIDEO1394_DEV MKDEV(IEEE1394_MAJOR, \
#define IEEE1394_EXPERIMENTAL_DEV MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_EXPERIMENTAL * 16) IEEE1394_MINOR_BLOCK_VIDEO1394 * 16)
#define IEEE1394_DV1394_DEV MKDEV(IEEE1394_MAJOR, \
IEEE1394_MINOR_BLOCK_DV1394 * 16)
#define IEEE1394_EXPERIMENTAL_DEV MKDEV(IEEE1394_MAJOR, \
IEEE1394_MINOR_BLOCK_EXPERIMENTAL * 16)
/* return the index (within a minor number block) of a file */ /* return the index (within a minor number block) of a file */
static inline unsigned char ieee1394_file_to_instance(struct file *file) static inline unsigned char ieee1394_file_to_instance(struct file *file)
@ -223,4 +228,3 @@ extern struct class hpsb_host_class;
extern struct class *hpsb_protocol_class; extern struct class *hpsb_protocol_class;
#endif /* _IEEE1394_CORE_H */ #endif /* _IEEE1394_CORE_H */

View file

@ -1,33 +1,19 @@
#ifndef _IEEE1394_HOTPLUG_H #ifndef _IEEE1394_HOTPLUG_H
#define _IEEE1394_HOTPLUG_H #define _IEEE1394_HOTPLUG_H
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/mod_devicetable.h>
/* Unit spec id and sw version entry for some protocols */ /* Unit spec id and sw version entry for some protocols */
#define AVC_UNIT_SPEC_ID_ENTRY 0x0000A02D #define AVC_UNIT_SPEC_ID_ENTRY 0x0000A02D
#define AVC_SW_VERSION_ENTRY 0x00010001 #define AVC_SW_VERSION_ENTRY 0x00010001
#define CAMERA_UNIT_SPEC_ID_ENTRY 0x0000A02D #define CAMERA_UNIT_SPEC_ID_ENTRY 0x0000A02D
#define CAMERA_SW_VERSION_ENTRY 0x00000100 #define CAMERA_SW_VERSION_ENTRY 0x00000100
/* Check to make sure this all isn't already defined */ /* /include/linux/mod_devicetable.h defines:
#ifndef IEEE1394_MATCH_VENDOR_ID * IEEE1394_MATCH_VENDOR_ID
* IEEE1394_MATCH_MODEL_ID
#define IEEE1394_MATCH_VENDOR_ID 0x0001 * IEEE1394_MATCH_SPECIFIER_ID
#define IEEE1394_MATCH_MODEL_ID 0x0002 * IEEE1394_MATCH_VERSION
#define IEEE1394_MATCH_SPECIFIER_ID 0x0004 * struct ieee1394_device_id
#define IEEE1394_MATCH_VERSION 0x0008 */
#include <linux/mod_devicetable.h>
struct ieee1394_device_id {
u32 match_flags;
u32 vendor_id;
u32 model_id;
u32 specifier_id;
u32 version;
void *driver_data;
};
#endif
#endif /* _IEEE1394_HOTPLUG_H */ #endif /* _IEEE1394_HOTPLUG_H */

View file

@ -9,19 +9,17 @@
* directory of the kernel sources for details. * directory of the kernel sources for details.
*/ */
#include <linux/sched.h>
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/smp_lock.h> #include <linux/spinlock.h>
#include <linux/interrupt.h> #include <linux/wait.h>
#include <asm/bug.h>
#include <asm/errno.h> #include <asm/errno.h>
#include "ieee1394.h" #include "ieee1394.h"
#include "ieee1394_types.h" #include "ieee1394_types.h"
#include "hosts.h" #include "hosts.h"
#include "ieee1394_core.h" #include "ieee1394_core.h"
#include "highlevel.h"
#include "nodemgr.h"
#include "ieee1394_transactions.h" #include "ieee1394_transactions.h"
#define PREP_ASYNC_HEAD_ADDRESS(tc) \ #define PREP_ASYNC_HEAD_ADDRESS(tc) \
@ -31,6 +29,13 @@
packet->header[1] = (packet->host->node_id << 16) | (addr >> 32); \ packet->header[1] = (packet->host->node_id << 16) | (addr >> 32); \
packet->header[2] = addr & 0xffffffff packet->header[2] = addr & 0xffffffff
#ifndef HPSB_DEBUG_TLABELS
static
#endif
spinlock_t hpsb_tlabel_lock = SPIN_LOCK_UNLOCKED;
static DECLARE_WAIT_QUEUE_HEAD(tlabel_wq);
static void fill_async_readquad(struct hpsb_packet *packet, u64 addr) static void fill_async_readquad(struct hpsb_packet *packet, u64 addr)
{ {
PREP_ASYNC_HEAD_ADDRESS(TCODE_READQ); PREP_ASYNC_HEAD_ADDRESS(TCODE_READQ);
@ -114,9 +119,41 @@ static void fill_async_stream_packet(struct hpsb_packet *packet, int length,
packet->tcode = TCODE_ISO_DATA; packet->tcode = TCODE_ISO_DATA;
} }
/* same as hpsb_get_tlabel, except that it returns immediately */
static int hpsb_get_tlabel_atomic(struct hpsb_packet *packet)
{
unsigned long flags, *tp;
u8 *next;
int tlabel, n = NODEID_TO_NODE(packet->node_id);
/* Broadcast transactions are complete once the request has been sent.
* Use the same transaction label for all broadcast transactions. */
if (unlikely(n == ALL_NODES)) {
packet->tlabel = 0;
return 0;
}
tp = packet->host->tl_pool[n].map;
next = &packet->host->next_tl[n];
spin_lock_irqsave(&hpsb_tlabel_lock, flags);
tlabel = find_next_zero_bit(tp, 64, *next);
if (tlabel > 63)
tlabel = find_first_zero_bit(tp, 64);
if (tlabel > 63) {
spin_unlock_irqrestore(&hpsb_tlabel_lock, flags);
return -EAGAIN;
}
__set_bit(tlabel, tp);
*next = (tlabel + 1) & 63;
spin_unlock_irqrestore(&hpsb_tlabel_lock, flags);
packet->tlabel = tlabel;
return 0;
}
/** /**
* hpsb_get_tlabel - allocate a transaction label * hpsb_get_tlabel - allocate a transaction label
* @packet: the packet who's tlabel/tpool we set * @packet: the packet whose tlabel and tl_pool we set
* *
* Every asynchronous transaction on the 1394 bus needs a transaction * Every asynchronous transaction on the 1394 bus needs a transaction
* label to match the response to the request. This label has to be * label to match the response to the request. This label has to be
@ -130,42 +167,25 @@ static void fill_async_stream_packet(struct hpsb_packet *packet, int length,
* Return value: Zero on success, otherwise non-zero. A non-zero return * Return value: Zero on success, otherwise non-zero. A non-zero return
* generally means there are no available tlabels. If this is called out * generally means there are no available tlabels. If this is called out
* of interrupt or atomic context, then it will sleep until can return a * of interrupt or atomic context, then it will sleep until can return a
* tlabel. * tlabel or a signal is received.
*/ */
int hpsb_get_tlabel(struct hpsb_packet *packet) int hpsb_get_tlabel(struct hpsb_packet *packet)
{ {
unsigned long flags; if (irqs_disabled() || in_atomic())
struct hpsb_tlabel_pool *tp; return hpsb_get_tlabel_atomic(packet);
int n = NODEID_TO_NODE(packet->node_id);
if (unlikely(n == ALL_NODES)) /* NB: The macro wait_event_interruptible() is called with a condition
return 0; * argument with side effect. This is only possible because the side
tp = &packet->host->tpool[n]; * effect does not occur until the condition became true, and
* wait_event_interruptible() won't evaluate the condition again after
if (irqs_disabled() || in_atomic()) { * that. */
if (down_trylock(&tp->count)) return wait_event_interruptible(tlabel_wq,
return 1; !hpsb_get_tlabel_atomic(packet));
} else {
down(&tp->count);
}
spin_lock_irqsave(&tp->lock, flags);
packet->tlabel = find_next_zero_bit(tp->pool, 64, tp->next);
if (packet->tlabel > 63)
packet->tlabel = find_first_zero_bit(tp->pool, 64);
tp->next = (packet->tlabel + 1) % 64;
/* Should _never_ happen */
BUG_ON(test_and_set_bit(packet->tlabel, tp->pool));
tp->allocations++;
spin_unlock_irqrestore(&tp->lock, flags);
return 0;
} }
/** /**
* hpsb_free_tlabel - free an allocated transaction label * hpsb_free_tlabel - free an allocated transaction label
* @packet: packet whos tlabel/tpool needs to be cleared * @packet: packet whose tlabel and tl_pool needs to be cleared
* *
* Frees the transaction label allocated with hpsb_get_tlabel(). The * Frees the transaction label allocated with hpsb_get_tlabel(). The
* tlabel has to be freed after the transaction is complete (i.e. response * tlabel has to be freed after the transaction is complete (i.e. response
@ -176,21 +196,20 @@ int hpsb_get_tlabel(struct hpsb_packet *packet)
*/ */
void hpsb_free_tlabel(struct hpsb_packet *packet) void hpsb_free_tlabel(struct hpsb_packet *packet)
{ {
unsigned long flags; unsigned long flags, *tp;
struct hpsb_tlabel_pool *tp; int tlabel, n = NODEID_TO_NODE(packet->node_id);
int n = NODEID_TO_NODE(packet->node_id);
if (unlikely(n == ALL_NODES)) if (unlikely(n == ALL_NODES))
return; return;
tp = &packet->host->tpool[n]; tp = packet->host->tl_pool[n].map;
tlabel = packet->tlabel;
BUG_ON(tlabel > 63 || tlabel < 0);
BUG_ON(packet->tlabel > 63 || packet->tlabel < 0); spin_lock_irqsave(&hpsb_tlabel_lock, flags);
BUG_ON(!__test_and_clear_bit(tlabel, tp));
spin_unlock_irqrestore(&hpsb_tlabel_lock, flags);
spin_lock_irqsave(&tp->lock, flags); wake_up_interruptible(&tlabel_wq);
BUG_ON(!test_and_clear_bit(packet->tlabel, tp->pool));
spin_unlock_irqrestore(&tp->lock, flags);
up(&tp->count);
} }
int hpsb_packet_success(struct hpsb_packet *packet) int hpsb_packet_success(struct hpsb_packet *packet)
@ -214,7 +233,7 @@ int hpsb_packet_success(struct hpsb_packet *packet)
packet->node_id); packet->node_id);
return -EAGAIN; return -EAGAIN;
} }
HPSB_PANIC("reached unreachable code 1 in %s", __FUNCTION__); BUG();
case ACK_BUSY_X: case ACK_BUSY_X:
case ACK_BUSY_A: case ACK_BUSY_A:
@ -261,8 +280,7 @@ int hpsb_packet_success(struct hpsb_packet *packet)
packet->ack_code, packet->node_id, packet->tcode); packet->ack_code, packet->node_id, packet->tcode);
return -EAGAIN; return -EAGAIN;
} }
BUG();
HPSB_PANIC("reached unreachable code 2 in %s", __FUNCTION__);
} }
struct hpsb_packet *hpsb_make_readpacket(struct hpsb_host *host, nodeid_t node, struct hpsb_packet *hpsb_make_readpacket(struct hpsb_host *host, nodeid_t node,

View file

@ -1,32 +1,32 @@
#ifndef _IEEE1394_TRANSACTIONS_H #ifndef _IEEE1394_TRANSACTIONS_H
#define _IEEE1394_TRANSACTIONS_H #define _IEEE1394_TRANSACTIONS_H
#include "ieee1394_core.h" #include <linux/types.h>
#include "ieee1394_types.h"
struct hpsb_packet;
struct hpsb_host;
/*
* Get and free transaction labels.
*/
int hpsb_get_tlabel(struct hpsb_packet *packet); int hpsb_get_tlabel(struct hpsb_packet *packet);
void hpsb_free_tlabel(struct hpsb_packet *packet); void hpsb_free_tlabel(struct hpsb_packet *packet);
struct hpsb_packet *hpsb_make_readpacket(struct hpsb_host *host, nodeid_t node, struct hpsb_packet *hpsb_make_readpacket(struct hpsb_host *host, nodeid_t node,
u64 addr, size_t length); u64 addr, size_t length);
struct hpsb_packet *hpsb_make_lockpacket(struct hpsb_host *host, nodeid_t node, struct hpsb_packet *hpsb_make_lockpacket(struct hpsb_host *host, nodeid_t node,
u64 addr, int extcode, quadlet_t *data, u64 addr, int extcode, quadlet_t *data,
quadlet_t arg); quadlet_t arg);
struct hpsb_packet *hpsb_make_lock64packet(struct hpsb_host *host, nodeid_t node, struct hpsb_packet *hpsb_make_lock64packet(struct hpsb_host *host,
u64 addr, int extcode, octlet_t *data, nodeid_t node, u64 addr, int extcode,
octlet_t arg); octlet_t *data, octlet_t arg);
struct hpsb_packet *hpsb_make_phypacket(struct hpsb_host *host, struct hpsb_packet *hpsb_make_phypacket(struct hpsb_host *host, quadlet_t data);
quadlet_t data) ; struct hpsb_packet *hpsb_make_isopacket(struct hpsb_host *host, int length,
struct hpsb_packet *hpsb_make_isopacket(struct hpsb_host *host, int channel, int tag, int sync);
int length, int channel, struct hpsb_packet *hpsb_make_writepacket(struct hpsb_host *host,
int tag, int sync); nodeid_t node, u64 addr,
struct hpsb_packet *hpsb_make_writepacket (struct hpsb_host *host, nodeid_t node, quadlet_t *buffer, size_t length);
u64 addr, quadlet_t *buffer, size_t length);
struct hpsb_packet *hpsb_make_streampacket(struct hpsb_host *host, u8 *buffer, struct hpsb_packet *hpsb_make_streampacket(struct hpsb_host *host, u8 *buffer,
int length, int channel, int tag, int sync); int length, int channel, int tag,
int sync);
/* /*
* hpsb_packet_success - Make sense of the ack and reply codes and * hpsb_packet_success - Make sense of the ack and reply codes and
@ -40,9 +40,8 @@ struct hpsb_packet *hpsb_make_streampacket(struct hpsb_host *host, u8 *buffer,
*/ */
int hpsb_packet_success(struct hpsb_packet *packet); int hpsb_packet_success(struct hpsb_packet *packet);
/* /*
* The generic read, write and lock functions. All recognize the local node ID * The generic read and write functions. All recognize the local node ID
* and act accordingly. Read and write automatically use quadlet commands if * and act accordingly. Read and write automatically use quadlet commands if
* length == 4 and and block commands otherwise (however, they do not yet * length == 4 and and block commands otherwise (however, they do not yet
* support lengths that are not a multiple of 4). You must explicitly specifiy * support lengths that are not a multiple of 4). You must explicitly specifiy
@ -54,4 +53,8 @@ int hpsb_read(struct hpsb_host *host, nodeid_t node, unsigned int generation,
int hpsb_write(struct hpsb_host *host, nodeid_t node, unsigned int generation, int hpsb_write(struct hpsb_host *host, nodeid_t node, unsigned int generation,
u64 addr, quadlet_t *buffer, size_t length); u64 addr, quadlet_t *buffer, size_t length);
#ifdef HPSB_DEBUG_TLABELS
extern spinlock_t hpsb_tlabel_lock;
#endif
#endif /* _IEEE1394_TRANSACTIONS_H */ #endif /* _IEEE1394_TRANSACTIONS_H */

View file

@ -1,37 +1,11 @@
#ifndef _IEEE1394_TYPES_H #ifndef _IEEE1394_TYPES_H
#define _IEEE1394_TYPES_H #define _IEEE1394_TYPES_H
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/types.h>
#include <asm/semaphore.h>
#include <asm/byteorder.h> #include <asm/byteorder.h>
/* Transaction Label handling */
struct hpsb_tlabel_pool {
DECLARE_BITMAP(pool, 64);
spinlock_t lock;
u8 next;
u32 allocations;
struct semaphore count;
};
#define HPSB_TPOOL_INIT(_tp) \
do { \
bitmap_zero((_tp)->pool, 64); \
spin_lock_init(&(_tp)->lock); \
(_tp)->next = 0; \
(_tp)->allocations = 0; \
sema_init(&(_tp)->count, 63); \
} while (0)
typedef u32 quadlet_t; typedef u32 quadlet_t;
typedef u64 octlet_t; typedef u64 octlet_t;
typedef u16 nodeid_t; typedef u16 nodeid_t;
@ -54,46 +28,40 @@ typedef u16 arm_length_t;
#define NODE_BUS_ARGS(__host, __nodeid) \ #define NODE_BUS_ARGS(__host, __nodeid) \
__host->id, NODEID_TO_NODE(__nodeid), NODEID_TO_BUS(__nodeid) __host->id, NODEID_TO_NODE(__nodeid), NODEID_TO_BUS(__nodeid)
#define HPSB_PRINT(level, fmt, args...) printk(level "ieee1394: " fmt "\n" , ## args) #define HPSB_PRINT(level, fmt, args...) \
printk(level "ieee1394: " fmt "\n" , ## args)
#define HPSB_DEBUG(fmt, args...) HPSB_PRINT(KERN_DEBUG, fmt , ## args) #define HPSB_DEBUG(fmt, args...) HPSB_PRINT(KERN_DEBUG, fmt , ## args)
#define HPSB_INFO(fmt, args...) HPSB_PRINT(KERN_INFO, fmt , ## args) #define HPSB_INFO(fmt, args...) HPSB_PRINT(KERN_INFO, fmt , ## args)
#define HPSB_NOTICE(fmt, args...) HPSB_PRINT(KERN_NOTICE, fmt , ## args) #define HPSB_NOTICE(fmt, args...) HPSB_PRINT(KERN_NOTICE, fmt , ## args)
#define HPSB_WARN(fmt, args...) HPSB_PRINT(KERN_WARNING, fmt , ## args) #define HPSB_WARN(fmt, args...) HPSB_PRINT(KERN_WARNING, fmt , ## args)
#define HPSB_ERR(fmt, args...) HPSB_PRINT(KERN_ERR, fmt , ## args) #define HPSB_ERR(fmt, args...) HPSB_PRINT(KERN_ERR, fmt , ## args)
#ifdef CONFIG_IEEE1394_VERBOSEDEBUG #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
#define HPSB_VERBOSE(fmt, args...) HPSB_PRINT(KERN_DEBUG, fmt , ## args) #define HPSB_VERBOSE(fmt, args...) HPSB_PRINT(KERN_DEBUG, fmt , ## args)
#define HPSB_DEBUG_TLABELS
#else #else
#define HPSB_VERBOSE(fmt, args...) #define HPSB_VERBOSE(fmt, args...) do {} while (0)
#endif #endif
#define HPSB_PANIC(fmt, args...) panic("ieee1394: " fmt "\n" , ## args)
#define HPSB_TRACE() HPSB_PRINT(KERN_INFO, "TRACE - %s, %s(), line %d", __FILE__, __FUNCTION__, __LINE__)
#ifdef __BIG_ENDIAN #ifdef __BIG_ENDIAN
static __inline__ void *memcpy_le32(u32 *dest, const u32 *__src, size_t count) static inline void *memcpy_le32(u32 *dest, const u32 *__src, size_t count)
{ {
void *tmp = dest; void *tmp = dest;
u32 *src = (u32 *)__src; u32 *src = (u32 *)__src;
count /= 4; count /= 4;
while (count--)
while (count--) { *dest++ = swab32p(src++);
*dest++ = swab32p(src++); return tmp;
}
return tmp;
} }
#else #else
static __inline__ void *memcpy_le32(u32 *dest, const u32 *src, size_t count) static __inline__ void *memcpy_le32(u32 *dest, const u32 *src, size_t count)
{ {
return memcpy(dest, src, count); return memcpy(dest, src, count);
} }
#endif /* __BIG_ENDIAN */ #endif /* __BIG_ENDIAN */

View file

@ -9,8 +9,11 @@
* directory of the kernel sources for details. * directory of the kernel sources for details.
*/ */
#include <linux/slab.h> #include <linux/pci.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/slab.h>
#include "hosts.h"
#include "iso.h" #include "iso.h"
void hpsb_iso_stop(struct hpsb_iso *iso) void hpsb_iso_stop(struct hpsb_iso *iso)

View file

@ -12,33 +12,40 @@
#ifndef IEEE1394_ISO_H #ifndef IEEE1394_ISO_H
#define IEEE1394_ISO_H #define IEEE1394_ISO_H
#include "hosts.h" #include <linux/spinlock_types.h>
#include <asm/atomic.h>
#include <asm/types.h>
#include "dma.h" #include "dma.h"
struct hpsb_host;
/* high-level ISO interface */ /* high-level ISO interface */
/* This API sends and receives isochronous packets on a large, /*
virtually-contiguous kernel memory buffer. The buffer may be mapped * This API sends and receives isochronous packets on a large,
into a user-space process for zero-copy transmission and reception. * virtually-contiguous kernel memory buffer. The buffer may be mapped
* into a user-space process for zero-copy transmission and reception.
There are no explicit boundaries between packets in the buffer. A *
packet may be transmitted or received at any location. However, * There are no explicit boundaries between packets in the buffer. A
low-level drivers may impose certain restrictions on alignment or * packet may be transmitted or received at any location. However,
size of packets. (e.g. in OHCI no packet may cross a page boundary, * low-level drivers may impose certain restrictions on alignment or
and packets should be quadlet-aligned) * size of packets. (e.g. in OHCI no packet may cross a page boundary,
*/ * and packets should be quadlet-aligned)
*/
/* Packet descriptor - the API maintains a ring buffer of these packet /* Packet descriptor - the API maintains a ring buffer of these packet
descriptors in kernel memory (hpsb_iso.infos[]). */ * descriptors in kernel memory (hpsb_iso.infos[]). */
struct hpsb_iso_packet_info { struct hpsb_iso_packet_info {
/* offset of data payload relative to the first byte of the buffer */ /* offset of data payload relative to the first byte of the buffer */
__u32 offset; __u32 offset;
/* length of the data payload, in bytes (not including the isochronous header) */ /* length of the data payload, in bytes (not including the isochronous
* header) */
__u16 len; __u16 len;
/* (recv only) the cycle number (mod 8000) on which the packet was received */ /* (recv only) the cycle number (mod 8000) on which the packet was
* received */
__u16 cycle; __u16 cycle;
/* (recv only) channel on which the packet was received */ /* (recv only) channel on which the packet was received */
@ -48,12 +55,10 @@ struct hpsb_iso_packet_info {
__u8 tag; __u8 tag;
__u8 sy; __u8 sy;
/* /* length in bytes of the packet including header/trailer.
* length in bytes of the packet including header/trailer. * MUST be at structure end, since the first part of this structure is
* MUST be at structure end, since the first part of this structure is also * also defined in raw1394.h (i.e. struct raw1394_iso_packet_info), is
* defined in raw1394.h (i.e. struct raw1394_iso_packet_info), is copied to * copied to userspace and is accessed there through libraw1394. */
* userspace and is accessed there through libraw1394.
*/
__u16 total_len; __u16 total_len;
}; };
@ -75,8 +80,8 @@ struct hpsb_iso {
void *hostdata; void *hostdata;
/* a function to be called (from interrupt context) after /* a function to be called (from interrupt context) after
outgoing packets have been sent, or incoming packets have * outgoing packets have been sent, or incoming packets have
arrived */ * arrived */
void (*callback)(struct hpsb_iso*); void (*callback)(struct hpsb_iso*);
/* wait for buffer space */ /* wait for buffer space */
@ -88,7 +93,7 @@ struct hpsb_iso {
/* greatest # of packets between interrupts - controls /* greatest # of packets between interrupts - controls
the maximum latency of the buffer */ * the maximum latency of the buffer */
int irq_interval; int irq_interval;
/* the buffer for packet data payloads */ /* the buffer for packet data payloads */
@ -112,8 +117,8 @@ struct hpsb_iso {
int pkt_dma; int pkt_dma;
/* how many packets, starting at first_packet: /* how many packets, starting at first_packet:
(transmit) are ready to be filled with data * (transmit) are ready to be filled with data
(receive) contain received data */ * (receive) contain received data */
int n_ready_packets; int n_ready_packets;
/* how many times the buffer has overflowed or underflowed */ /* how many times the buffer has overflowed or underflowed */
@ -134,7 +139,7 @@ struct hpsb_iso {
int start_cycle; int start_cycle;
/* cycle at which next packet will be transmitted, /* cycle at which next packet will be transmitted,
-1 if not known */ * -1 if not known */
int xmit_cycle; int xmit_cycle;
/* ringbuffer of packet descriptors in regular kernel memory /* ringbuffer of packet descriptors in regular kernel memory
@ -170,25 +175,30 @@ int hpsb_iso_recv_unlisten_channel(struct hpsb_iso *iso, unsigned char channel);
int hpsb_iso_recv_set_channel_mask(struct hpsb_iso *iso, u64 mask); int hpsb_iso_recv_set_channel_mask(struct hpsb_iso *iso, u64 mask);
/* start/stop DMA */ /* start/stop DMA */
int hpsb_iso_xmit_start(struct hpsb_iso *iso, int start_on_cycle, int prebuffer); int hpsb_iso_xmit_start(struct hpsb_iso *iso, int start_on_cycle,
int hpsb_iso_recv_start(struct hpsb_iso *iso, int start_on_cycle, int tag_mask, int sync); int prebuffer);
int hpsb_iso_recv_start(struct hpsb_iso *iso, int start_on_cycle,
int tag_mask, int sync);
void hpsb_iso_stop(struct hpsb_iso *iso); void hpsb_iso_stop(struct hpsb_iso *iso);
/* deallocate buffer and DMA context */ /* deallocate buffer and DMA context */
void hpsb_iso_shutdown(struct hpsb_iso *iso); void hpsb_iso_shutdown(struct hpsb_iso *iso);
/* queue a packet for transmission. 'offset' is relative to the beginning of the /* queue a packet for transmission.
DMA buffer, where the packet's data payload should already have been placed */ * 'offset' is relative to the beginning of the DMA buffer, where the packet's
int hpsb_iso_xmit_queue_packet(struct hpsb_iso *iso, u32 offset, u16 len, u8 tag, u8 sy); * data payload should already have been placed. */
int hpsb_iso_xmit_queue_packet(struct hpsb_iso *iso, u32 offset, u16 len,
u8 tag, u8 sy);
/* wait until all queued packets have been transmitted to the bus */ /* wait until all queued packets have been transmitted to the bus */
int hpsb_iso_xmit_sync(struct hpsb_iso *iso); int hpsb_iso_xmit_sync(struct hpsb_iso *iso);
/* N packets have been read out of the buffer, re-use the buffer space */ /* N packets have been read out of the buffer, re-use the buffer space */
int hpsb_iso_recv_release_packets(struct hpsb_iso *recv, unsigned int n_packets); int hpsb_iso_recv_release_packets(struct hpsb_iso *recv,
unsigned int n_packets);
/* check for arrival of new packets immediately (even if irq_interval /* check for arrival of new packets immediately (even if irq_interval
has not yet been reached) */ * has not yet been reached) */
int hpsb_iso_recv_flush(struct hpsb_iso *iso); int hpsb_iso_recv_flush(struct hpsb_iso *iso);
/* returns # of packets ready to send or receive */ /* returns # of packets ready to send or receive */
@ -197,14 +207,15 @@ int hpsb_iso_n_ready(struct hpsb_iso *iso);
/* the following are callbacks available to low-level drivers */ /* the following are callbacks available to low-level drivers */
/* call after a packet has been transmitted to the bus (interrupt context is OK) /* call after a packet has been transmitted to the bus (interrupt context is OK)
'cycle' is the _exact_ cycle the packet was sent on * 'cycle' is the _exact_ cycle the packet was sent on
'error' should be non-zero if some sort of error occurred when sending the packet * 'error' should be non-zero if some sort of error occurred when sending the
*/ * packet */
void hpsb_iso_packet_sent(struct hpsb_iso *iso, int cycle, int error); void hpsb_iso_packet_sent(struct hpsb_iso *iso, int cycle, int error);
/* call after a packet has been received (interrupt context OK) */ /* call after a packet has been received (interrupt context OK) */
void hpsb_iso_packet_received(struct hpsb_iso *iso, u32 offset, u16 len, void hpsb_iso_packet_received(struct hpsb_iso *iso, u32 offset, u16 len,
u16 total_len, u16 cycle, u8 channel, u8 tag, u8 sy); u16 total_len, u16 cycle, u8 channel, u8 tag,
u8 sy);
/* call to wake waiting processes after buffer space has opened up. */ /* call to wake waiting processes after buffer space has opened up. */
void hpsb_iso_wake(struct hpsb_iso *iso); void hpsb_iso_wake(struct hpsb_iso *iso);

View file

@ -12,26 +12,23 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/list.h> #include <linux/list.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/smp_lock.h>
#include <linux/interrupt.h>
#include <linux/kmod.h>
#include <linux/completion.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/pci.h> #include <linux/kthread.h>
#include <linux/moduleparam.h> #include <linux/moduleparam.h>
#include <asm/atomic.h> #include <asm/atomic.h>
#include "ieee1394_types.h" #include "csr.h"
#include "highlevel.h"
#include "hosts.h"
#include "ieee1394.h" #include "ieee1394.h"
#include "ieee1394_core.h" #include "ieee1394_core.h"
#include "hosts.h" #include "ieee1394_hotplug.h"
#include "ieee1394_types.h"
#include "ieee1394_transactions.h" #include "ieee1394_transactions.h"
#include "highlevel.h"
#include "csr.h"
#include "nodemgr.h" #include "nodemgr.h"
static int ignore_drivers; static int ignore_drivers;
module_param(ignore_drivers, int, 0444); module_param(ignore_drivers, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ignore_drivers, "Disable automatic probing for drivers."); MODULE_PARM_DESC(ignore_drivers, "Disable automatic probing for drivers.");
struct nodemgr_csr_info { struct nodemgr_csr_info {
@ -71,7 +68,7 @@ static int nodemgr_check_speed(struct nodemgr_csr_info *ci, u64 addr,
u8 i, *speed, old_speed, good_speed; u8 i, *speed, old_speed, good_speed;
int ret; int ret;
speed = ci->host->speed + NODEID_TO_NODE(ci->nodeid); speed = &(ci->host->speed[NODEID_TO_NODE(ci->nodeid)]);
old_speed = *speed; old_speed = *speed;
good_speed = IEEE1394_SPEED_MAX + 1; good_speed = IEEE1394_SPEED_MAX + 1;
@ -161,16 +158,12 @@ static struct csr1212_bus_ops nodemgr_csr_ops = {
* but now we are much simpler because of the LDM. * but now we are much simpler because of the LDM.
*/ */
static DECLARE_MUTEX(nodemgr_serialize); static DEFINE_MUTEX(nodemgr_serialize);
struct host_info { struct host_info {
struct hpsb_host *host; struct hpsb_host *host;
struct list_head list; struct list_head list;
struct completion exited; struct task_struct *thread;
struct semaphore reset_sem;
int pid;
char daemon_name[15];
int kill_me;
}; };
static int nodemgr_bus_match(struct device * dev, struct device_driver * drv); static int nodemgr_bus_match(struct device * dev, struct device_driver * drv);
@ -334,34 +327,44 @@ static ssize_t fw_show_ne_bus_options(struct device *dev, struct device_attribut
static DEVICE_ATTR(bus_options,S_IRUGO,fw_show_ne_bus_options,NULL); static DEVICE_ATTR(bus_options,S_IRUGO,fw_show_ne_bus_options,NULL);
/* tlabels_free, tlabels_allocations, tlabels_mask are read non-atomically #ifdef HPSB_DEBUG_TLABELS
* here, therefore displayed values may be occasionally wrong. */ static ssize_t fw_show_ne_tlabels_free(struct device *dev,
static ssize_t fw_show_ne_tlabels_free(struct device *dev, struct device_attribute *attr, char *buf) struct device_attribute *attr, char *buf)
{ {
struct node_entry *ne = container_of(dev, struct node_entry, device); struct node_entry *ne = container_of(dev, struct node_entry, device);
return sprintf(buf, "%d\n", 64 - bitmap_weight(ne->tpool->pool, 64)); unsigned long flags;
unsigned long *tp = ne->host->tl_pool[NODEID_TO_NODE(ne->nodeid)].map;
int tf;
spin_lock_irqsave(&hpsb_tlabel_lock, flags);
tf = 64 - bitmap_weight(tp, 64);
spin_unlock_irqrestore(&hpsb_tlabel_lock, flags);
return sprintf(buf, "%d\n", tf);
} }
static DEVICE_ATTR(tlabels_free,S_IRUGO,fw_show_ne_tlabels_free,NULL); static DEVICE_ATTR(tlabels_free,S_IRUGO,fw_show_ne_tlabels_free,NULL);
static ssize_t fw_show_ne_tlabels_allocations(struct device *dev, struct device_attribute *attr, char *buf) static ssize_t fw_show_ne_tlabels_mask(struct device *dev,
struct device_attribute *attr, char *buf)
{ {
struct node_entry *ne = container_of(dev, struct node_entry, device); struct node_entry *ne = container_of(dev, struct node_entry, device);
return sprintf(buf, "%u\n", ne->tpool->allocations); unsigned long flags;
} unsigned long *tp = ne->host->tl_pool[NODEID_TO_NODE(ne->nodeid)].map;
static DEVICE_ATTR(tlabels_allocations,S_IRUGO,fw_show_ne_tlabels_allocations,NULL); u64 tm;
spin_lock_irqsave(&hpsb_tlabel_lock, flags);
static ssize_t fw_show_ne_tlabels_mask(struct device *dev, struct device_attribute *attr, char *buf)
{
struct node_entry *ne = container_of(dev, struct node_entry, device);
#if (BITS_PER_LONG <= 32) #if (BITS_PER_LONG <= 32)
return sprintf(buf, "0x%08lx%08lx\n", ne->tpool->pool[0], ne->tpool->pool[1]); tm = ((u64)tp[0] << 32) + tp[1];
#else #else
return sprintf(buf, "0x%016lx\n", ne->tpool->pool[0]); tm = tp[0];
#endif #endif
spin_unlock_irqrestore(&hpsb_tlabel_lock, flags);
return sprintf(buf, "0x%016llx\n", tm);
} }
static DEVICE_ATTR(tlabels_mask, S_IRUGO, fw_show_ne_tlabels_mask, NULL); static DEVICE_ATTR(tlabels_mask, S_IRUGO, fw_show_ne_tlabels_mask, NULL);
#endif /* HPSB_DEBUG_TLABELS */
static ssize_t fw_set_ignore_driver(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) static ssize_t fw_set_ignore_driver(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
@ -408,26 +411,11 @@ static ssize_t fw_get_destroy_node(struct bus_type *bus, char *buf)
} }
static BUS_ATTR(destroy_node, S_IWUSR | S_IRUGO, fw_get_destroy_node, fw_set_destroy_node); static BUS_ATTR(destroy_node, S_IWUSR | S_IRUGO, fw_get_destroy_node, fw_set_destroy_node);
static int nodemgr_rescan_bus_thread(void *__unused)
{
/* No userlevel access needed */
daemonize("kfwrescan");
bus_rescan_devices(&ieee1394_bus_type);
return 0;
}
static ssize_t fw_set_rescan(struct bus_type *bus, const char *buf, size_t count) static ssize_t fw_set_rescan(struct bus_type *bus, const char *buf, size_t count)
{ {
int state = simple_strtoul(buf, NULL, 10); if (simple_strtoul(buf, NULL, 10) == 1)
bus_rescan_devices(&ieee1394_bus_type);
/* Don't wait for this, or care about errors. Root could do
* something stupid and spawn this a lot of times, but that's
* root's fault. */
if (state == 1)
kernel_thread(nodemgr_rescan_bus_thread, NULL, CLONE_KERNEL);
return count; return count;
} }
static ssize_t fw_get_rescan(struct bus_type *bus, char *buf) static ssize_t fw_get_rescan(struct bus_type *bus, char *buf)
@ -483,9 +471,10 @@ static struct device_attribute *const fw_ne_attrs[] = {
&dev_attr_ne_vendor_id, &dev_attr_ne_vendor_id,
&dev_attr_ne_nodeid, &dev_attr_ne_nodeid,
&dev_attr_bus_options, &dev_attr_bus_options,
#ifdef HPSB_DEBUG_TLABELS
&dev_attr_tlabels_free, &dev_attr_tlabels_free,
&dev_attr_tlabels_allocations,
&dev_attr_tlabels_mask, &dev_attr_tlabels_mask,
#endif
}; };
@ -804,8 +793,6 @@ static struct node_entry *nodemgr_create_node(octlet_t guid, struct csr1212_csr
if (!ne) if (!ne)
return NULL; return NULL;
ne->tpool = &host->tpool[nodeid & NODE_MASK];
ne->host = host; ne->host = host;
ne->nodeid = nodeid; ne->nodeid = nodeid;
ne->generation = generation; ne->generation = generation;
@ -1251,6 +1238,7 @@ static void nodemgr_node_scan_one(struct host_info *hi,
octlet_t guid; octlet_t guid;
struct csr1212_csr *csr; struct csr1212_csr *csr;
struct nodemgr_csr_info *ci; struct nodemgr_csr_info *ci;
u8 *speed;
ci = kmalloc(sizeof(*ci), GFP_KERNEL); ci = kmalloc(sizeof(*ci), GFP_KERNEL);
if (!ci) if (!ci)
@ -1259,8 +1247,12 @@ static void nodemgr_node_scan_one(struct host_info *hi,
ci->host = host; ci->host = host;
ci->nodeid = nodeid; ci->nodeid = nodeid;
ci->generation = generation; ci->generation = generation;
ci->speed_unverified =
host->speed[NODEID_TO_NODE(nodeid)] > IEEE1394_SPEED_100; /* Prepare for speed probe which occurs when reading the ROM */
speed = &(host->speed[NODEID_TO_NODE(nodeid)]);
if (*speed > host->csr.lnk_spd)
*speed = host->csr.lnk_spd;
ci->speed_unverified = *speed > IEEE1394_SPEED_100;
/* We need to detect when the ConfigROM's generation has changed, /* We need to detect when the ConfigROM's generation has changed,
* so we only update the node's info when it needs to be. */ * so we only update the node's info when it needs to be. */
@ -1300,8 +1292,6 @@ static void nodemgr_node_scan_one(struct host_info *hi,
nodemgr_create_node(guid, csr, hi, nodeid, generation); nodemgr_create_node(guid, csr, hi, nodeid, generation);
else else
nodemgr_update_node(ne, csr, hi, nodeid, generation); nodemgr_update_node(ne, csr, hi, nodeid, generation);
return;
} }
@ -1326,6 +1316,7 @@ static void nodemgr_node_scan(struct host_info *hi, int generation)
} }
/* Caller needs to hold nodemgr_ud_class.subsys.rwsem as reader. */
static void nodemgr_suspend_ne(struct node_entry *ne) static void nodemgr_suspend_ne(struct node_entry *ne)
{ {
struct class_device *cdev; struct class_device *cdev;
@ -1361,6 +1352,7 @@ static void nodemgr_resume_ne(struct node_entry *ne)
ne->in_limbo = 0; ne->in_limbo = 0;
device_remove_file(&ne->device, &dev_attr_ne_in_limbo); device_remove_file(&ne->device, &dev_attr_ne_in_limbo);
down_read(&nodemgr_ud_class.subsys.rwsem);
down_read(&ne->device.bus->subsys.rwsem); down_read(&ne->device.bus->subsys.rwsem);
list_for_each_entry(cdev, &nodemgr_ud_class.children, node) { list_for_each_entry(cdev, &nodemgr_ud_class.children, node) {
ud = container_of(cdev, struct unit_directory, class_dev); ud = container_of(cdev, struct unit_directory, class_dev);
@ -1372,21 +1364,21 @@ static void nodemgr_resume_ne(struct node_entry *ne)
ud->device.driver->resume(&ud->device); ud->device.driver->resume(&ud->device);
} }
up_read(&ne->device.bus->subsys.rwsem); up_read(&ne->device.bus->subsys.rwsem);
up_read(&nodemgr_ud_class.subsys.rwsem);
HPSB_DEBUG("Node resumed: ID:BUS[" NODE_BUS_FMT "] GUID[%016Lx]", HPSB_DEBUG("Node resumed: ID:BUS[" NODE_BUS_FMT "] GUID[%016Lx]",
NODE_BUS_ARGS(ne->host, ne->nodeid), (unsigned long long)ne->guid); NODE_BUS_ARGS(ne->host, ne->nodeid), (unsigned long long)ne->guid);
} }
/* Caller needs to hold nodemgr_ud_class.subsys.rwsem as reader. */
static void nodemgr_update_pdrv(struct node_entry *ne) static void nodemgr_update_pdrv(struct node_entry *ne)
{ {
struct unit_directory *ud; struct unit_directory *ud;
struct hpsb_protocol_driver *pdrv; struct hpsb_protocol_driver *pdrv;
struct class *class = &nodemgr_ud_class;
struct class_device *cdev; struct class_device *cdev;
down_read(&class->subsys.rwsem); list_for_each_entry(cdev, &nodemgr_ud_class.children, node) {
list_for_each_entry(cdev, &class->children, node) {
ud = container_of(cdev, struct unit_directory, class_dev); ud = container_of(cdev, struct unit_directory, class_dev);
if (ud->ne != ne || !ud->device.driver) if (ud->ne != ne || !ud->device.driver)
continue; continue;
@ -1399,7 +1391,6 @@ static void nodemgr_update_pdrv(struct node_entry *ne)
up_write(&ud->device.bus->subsys.rwsem); up_write(&ud->device.bus->subsys.rwsem);
} }
} }
up_read(&class->subsys.rwsem);
} }
@ -1430,6 +1421,8 @@ static void nodemgr_irm_write_bc(struct node_entry *ne, int generation)
} }
/* Caller needs to hold nodemgr_ud_class.subsys.rwsem as reader because the
* calls to nodemgr_update_pdrv() and nodemgr_suspend_ne() here require it. */
static void nodemgr_probe_ne(struct host_info *hi, struct node_entry *ne, int generation) static void nodemgr_probe_ne(struct host_info *hi, struct node_entry *ne, int generation)
{ {
struct device *dev; struct device *dev;
@ -1492,9 +1485,8 @@ static void nodemgr_node_probe(struct host_info *hi, int generation)
/* If we had a bus reset while we were scanning the bus, it is /* If we had a bus reset while we were scanning the bus, it is
* possible that we did not probe all nodes. In that case, we * possible that we did not probe all nodes. In that case, we
* skip the clean up for now, since we could remove nodes that * skip the clean up for now, since we could remove nodes that
* were still on the bus. The bus reset increased hi->reset_sem, * were still on the bus. Another bus scan is pending which will
* so there's a bus scan pending which will do the clean up * do the clean up eventually.
* eventually.
* *
* Now let's tell the bus to rescan our devices. This may seem * Now let's tell the bus to rescan our devices. This may seem
* like overhead, but the driver-model core will only scan a * like overhead, but the driver-model core will only scan a
@ -1622,41 +1614,37 @@ static int nodemgr_host_thread(void *__hi)
{ {
struct host_info *hi = (struct host_info *)__hi; struct host_info *hi = (struct host_info *)__hi;
struct hpsb_host *host = hi->host; struct hpsb_host *host = hi->host;
int reset_cycles = 0; unsigned int g, generation = get_hpsb_generation(host) - 1;
int i, reset_cycles = 0;
/* No userlevel access needed */
daemonize(hi->daemon_name);
/* Setup our device-model entries */ /* Setup our device-model entries */
nodemgr_create_host_dev_files(host); nodemgr_create_host_dev_files(host);
/* Sit and wait for a signal to probe the nodes on the bus. This for (;;) {
* happens when we get a bus reset. */ /* Sleep until next bus reset */
while (1) { set_current_state(TASK_INTERRUPTIBLE);
unsigned int generation = 0; if (get_hpsb_generation(host) == generation)
int i; schedule();
__set_current_state(TASK_RUNNING);
if (down_interruptible(&hi->reset_sem) || /* Thread may have been woken up to freeze or to exit */
down_interruptible(&nodemgr_serialize)) { if (try_to_freeze())
continue;
if (kthread_should_stop())
goto exit;
if (mutex_lock_interruptible(&nodemgr_serialize)) {
if (try_to_freeze()) if (try_to_freeze())
continue; continue;
printk("NodeMgr: received unexpected signal?!\n" ); goto exit;
break;
}
if (hi->kill_me) {
up(&nodemgr_serialize);
break;
} }
/* Pause for 1/4 second in 1/16 second intervals, /* Pause for 1/4 second in 1/16 second intervals,
* to make sure things settle down. */ * to make sure things settle down. */
g = get_hpsb_generation(host);
for (i = 0; i < 4 ; i++) { for (i = 0; i < 4 ; i++) {
set_current_state(TASK_INTERRUPTIBLE); if (msleep_interruptible(63) || kthread_should_stop())
if (msleep_interruptible(63)) { goto unlock_exit;
up(&nodemgr_serialize);
goto caught_signal;
}
/* Now get the generation in which the node ID's we collect /* Now get the generation in which the node ID's we collect
* are valid. During the bus scan we will use this generation * are valid. During the bus scan we will use this generation
@ -1667,20 +1655,14 @@ static int nodemgr_host_thread(void *__hi)
/* If we get a reset before we are done waiting, then /* If we get a reset before we are done waiting, then
* start the the waiting over again */ * start the the waiting over again */
while (!down_trylock(&hi->reset_sem)) if (generation != g)
i = 0; g = generation, i = 0;
/* Check the kill_me again */
if (hi->kill_me) {
up(&nodemgr_serialize);
goto caught_signal;
}
} }
if (!nodemgr_check_irm_capability(host, reset_cycles) || if (!nodemgr_check_irm_capability(host, reset_cycles) ||
!nodemgr_do_irm_duties(host, reset_cycles)) { !nodemgr_do_irm_duties(host, reset_cycles)) {
reset_cycles++; reset_cycles++;
up(&nodemgr_serialize); mutex_unlock(&nodemgr_serialize);
continue; continue;
} }
reset_cycles = 0; reset_cycles = 0;
@ -1698,13 +1680,13 @@ static int nodemgr_host_thread(void *__hi)
/* Update some of our sysfs symlinks */ /* Update some of our sysfs symlinks */
nodemgr_update_host_dev_links(host); nodemgr_update_host_dev_links(host);
up(&nodemgr_serialize); mutex_unlock(&nodemgr_serialize);
} }
unlock_exit:
caught_signal: mutex_unlock(&nodemgr_serialize);
exit:
HPSB_VERBOSE("NodeMgr: Exiting thread"); HPSB_VERBOSE("NodeMgr: Exiting thread");
return 0;
complete_and_exit(&hi->exited, 0);
} }
int nodemgr_for_each_host(void *__data, int (*cb)(struct hpsb_host *, void *)) int nodemgr_for_each_host(void *__data, int (*cb)(struct hpsb_host *, void *))
@ -1764,41 +1746,27 @@ static void nodemgr_add_host(struct hpsb_host *host)
struct host_info *hi; struct host_info *hi;
hi = hpsb_create_hostinfo(&nodemgr_highlevel, host, sizeof(*hi)); hi = hpsb_create_hostinfo(&nodemgr_highlevel, host, sizeof(*hi));
if (!hi) { if (!hi) {
HPSB_ERR ("NodeMgr: out of memory in add host"); HPSB_ERR("NodeMgr: out of memory in add host");
return; return;
} }
hi->host = host; hi->host = host;
init_completion(&hi->exited); hi->thread = kthread_run(nodemgr_host_thread, hi, "knodemgrd_%d",
sema_init(&hi->reset_sem, 0); host->id);
if (IS_ERR(hi->thread)) {
sprintf(hi->daemon_name, "knodemgrd_%d", host->id); HPSB_ERR("NodeMgr: cannot start thread for host %d", host->id);
hi->pid = kernel_thread(nodemgr_host_thread, hi, CLONE_KERNEL);
if (hi->pid < 0) {
HPSB_ERR ("NodeMgr: failed to start %s thread for %s",
hi->daemon_name, host->driver->name);
hpsb_destroy_hostinfo(&nodemgr_highlevel, host); hpsb_destroy_hostinfo(&nodemgr_highlevel, host);
return;
} }
return;
} }
static void nodemgr_host_reset(struct hpsb_host *host) static void nodemgr_host_reset(struct hpsb_host *host)
{ {
struct host_info *hi = hpsb_get_hostinfo(&nodemgr_highlevel, host); struct host_info *hi = hpsb_get_hostinfo(&nodemgr_highlevel, host);
if (hi != NULL) { if (hi) {
HPSB_VERBOSE("NodeMgr: Processing host reset for %s", hi->daemon_name); HPSB_VERBOSE("NodeMgr: Processing reset for host %d", host->id);
up(&hi->reset_sem); wake_up_process(hi->thread);
} else }
HPSB_ERR ("NodeMgr: could not process reset of unused host");
return;
} }
static void nodemgr_remove_host(struct hpsb_host *host) static void nodemgr_remove_host(struct hpsb_host *host)
@ -1806,18 +1774,9 @@ static void nodemgr_remove_host(struct hpsb_host *host)
struct host_info *hi = hpsb_get_hostinfo(&nodemgr_highlevel, host); struct host_info *hi = hpsb_get_hostinfo(&nodemgr_highlevel, host);
if (hi) { if (hi) {
if (hi->pid >= 0) { kthread_stop(hi->thread);
hi->kill_me = 1; nodemgr_remove_host_dev(&host->device);
mb(); }
up(&hi->reset_sem);
wait_for_completion(&hi->exited);
nodemgr_remove_host_dev(&host->device);
}
} else
HPSB_ERR("NodeMgr: host %s does not exist, cannot remove",
host->driver->name);
return;
} }
static struct hpsb_highlevel nodemgr_highlevel = { static struct hpsb_highlevel nodemgr_highlevel = {

View file

@ -21,9 +21,15 @@
#define _IEEE1394_NODEMGR_H #define _IEEE1394_NODEMGR_H
#include <linux/device.h> #include <linux/device.h>
#include "csr1212.h" #include <asm/types.h>
#include "ieee1394_core.h" #include "ieee1394_core.h"
#include "ieee1394_hotplug.h" #include "ieee1394_types.h"
struct csr1212_csr;
struct csr1212_keyval;
struct hpsb_host;
struct ieee1394_device_id;
/* '1' '3' '9' '4' in ASCII */ /* '1' '3' '9' '4' in ASCII */
#define IEEE1394_BUSID_MAGIC __constant_cpu_to_be32(0x31333934) #define IEEE1394_BUSID_MAGIC __constant_cpu_to_be32(0x31333934)
@ -44,7 +50,6 @@ struct bus_options {
u16 max_rec; /* Maximum packet size node can receive */ u16 max_rec; /* Maximum packet size node can receive */
}; };
#define UNIT_DIRECTORY_VENDOR_ID 0x01 #define UNIT_DIRECTORY_VENDOR_ID 0x01
#define UNIT_DIRECTORY_MODEL_ID 0x02 #define UNIT_DIRECTORY_MODEL_ID 0x02
#define UNIT_DIRECTORY_SPECIFIER_ID 0x04 #define UNIT_DIRECTORY_SPECIFIER_ID 0x04
@ -59,8 +64,8 @@ struct bus_options {
* unit directory for each of these protocols. * unit directory for each of these protocols.
*/ */
struct unit_directory { struct unit_directory {
struct node_entry *ne; /* The node which this directory belongs to */ struct node_entry *ne; /* The node which this directory belongs to */
octlet_t address; /* Address of the unit directory on the node */ octlet_t address; /* Address of the unit directory on the node */
u8 flags; /* Indicates which entries were read */ u8 flags; /* Indicates which entries were read */
quadlet_t vendor_id; quadlet_t vendor_id;
@ -79,11 +84,10 @@ struct unit_directory {
int length; /* Number of quadlets */ int length; /* Number of quadlets */
struct device device; struct device device;
struct class_device class_dev; struct class_device class_dev;
struct csr1212_keyval *ud_kv; struct csr1212_keyval *ud_kv;
u32 lun; /* logical unit number immediate value */ u32 lun; /* logical unit number immediate value */
}; };
struct node_entry { struct node_entry {
@ -103,10 +107,8 @@ struct node_entry {
const char *vendor_oui; const char *vendor_oui;
u32 capabilities; u32 capabilities;
struct hpsb_tlabel_pool *tpool;
struct device device; struct device device;
struct class_device class_dev; struct class_device class_dev;
/* Means this node is not attached anymore */ /* Means this node is not attached anymore */
@ -153,8 +155,8 @@ static inline int hpsb_node_entry_valid(struct node_entry *ne)
/* /*
* This will fill in the given, pre-initialised hpsb_packet with the current * This will fill in the given, pre-initialised hpsb_packet with the current
* information from the node entry (host, node ID, generation number). It will * information from the node entry (host, node ID, generation number). It will
* return false if the node owning the GUID is not accessible (and not modify the * return false if the node owning the GUID is not accessible (and not modify
* hpsb_packet) and return true otherwise. * the hpsb_packet) and return true otherwise.
* *
* Note that packet sending may still fail in hpsb_send_packet if a bus reset * Note that packet sending may still fail in hpsb_send_packet if a bus reset
* happens while you are trying to set up the packet (due to obsolete generation * happens while you are trying to set up the packet (due to obsolete generation
@ -170,16 +172,13 @@ int hpsb_node_write(struct node_entry *ne, u64 addr,
int hpsb_node_lock(struct node_entry *ne, u64 addr, int hpsb_node_lock(struct node_entry *ne, u64 addr,
int extcode, quadlet_t *data, quadlet_t arg); int extcode, quadlet_t *data, quadlet_t arg);
/* Iterate the hosts, calling a given function with supplied data for each /* Iterate the hosts, calling a given function with supplied data for each
* host. */ * host. */
int nodemgr_for_each_host(void *__data, int (*cb)(struct hpsb_host *, void *)); int nodemgr_for_each_host(void *__data, int (*cb)(struct hpsb_host *, void *));
int init_ieee1394_nodemgr(void); int init_ieee1394_nodemgr(void);
void cleanup_ieee1394_nodemgr(void); void cleanup_ieee1394_nodemgr(void);
/* The template for a host device */ /* The template for a host device */
extern struct device nodemgr_dev_template_host; extern struct device nodemgr_dev_template_host;

View file

@ -136,7 +136,7 @@
#define DBGMSG(fmt, args...) \ #define DBGMSG(fmt, args...) \
printk(KERN_INFO "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args) printk(KERN_INFO "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
#else #else
#define DBGMSG(fmt, args...) #define DBGMSG(fmt, args...) do {} while (0)
#endif #endif
#ifdef CONFIG_IEEE1394_OHCI_DMA_DEBUG #ifdef CONFIG_IEEE1394_OHCI_DMA_DEBUG
@ -148,8 +148,8 @@ printk(KERN_INFO "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->
--global_outstanding_dmas, ## args) --global_outstanding_dmas, ## args)
static int global_outstanding_dmas = 0; static int global_outstanding_dmas = 0;
#else #else
#define OHCI_DMA_ALLOC(fmt, args...) #define OHCI_DMA_ALLOC(fmt, args...) do {} while (0)
#define OHCI_DMA_FREE(fmt, args...) #define OHCI_DMA_FREE(fmt, args...) do {} while (0)
#endif #endif
/* print general (card independent) information */ /* print general (card independent) information */
@ -181,36 +181,35 @@ static int alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
static void ohci1394_pci_remove(struct pci_dev *pdev); static void ohci1394_pci_remove(struct pci_dev *pdev);
#ifndef __LITTLE_ENDIAN #ifndef __LITTLE_ENDIAN
static unsigned hdr_sizes[] = const static size_t hdr_sizes[] = {
{
3, /* TCODE_WRITEQ */ 3, /* TCODE_WRITEQ */
4, /* TCODE_WRITEB */ 4, /* TCODE_WRITEB */
3, /* TCODE_WRITE_RESPONSE */ 3, /* TCODE_WRITE_RESPONSE */
0, /* ??? */ 0, /* reserved */
3, /* TCODE_READQ */ 3, /* TCODE_READQ */
4, /* TCODE_READB */ 4, /* TCODE_READB */
3, /* TCODE_READQ_RESPONSE */ 3, /* TCODE_READQ_RESPONSE */
4, /* TCODE_READB_RESPONSE */ 4, /* TCODE_READB_RESPONSE */
1, /* TCODE_CYCLE_START (???) */ 1, /* TCODE_CYCLE_START */
4, /* TCODE_LOCK_REQUEST */ 4, /* TCODE_LOCK_REQUEST */
2, /* TCODE_ISO_DATA */ 2, /* TCODE_ISO_DATA */
4, /* TCODE_LOCK_RESPONSE */ 4, /* TCODE_LOCK_RESPONSE */
/* rest is reserved or link-internal */
}; };
/* Swap headers */ static inline void header_le32_to_cpu(quadlet_t *data, unsigned char tcode)
static inline void packet_swab(quadlet_t *data, int tcode)
{ {
size_t size = hdr_sizes[tcode]; size_t size;
if (tcode > TCODE_LOCK_RESPONSE || hdr_sizes[tcode] == 0) if (unlikely(tcode >= ARRAY_SIZE(hdr_sizes)))
return; return;
size = hdr_sizes[tcode];
while (size--) while (size--)
data[size] = swab32(data[size]); data[size] = le32_to_cpu(data[size]);
} }
#else #else
/* Don't waste cycles on same sex byte swaps */ #define header_le32_to_cpu(w,x) do {} while (0)
#define packet_swab(w,x)
#endif /* !LITTLE_ENDIAN */ #endif /* !LITTLE_ENDIAN */
/*********************************** /***********************************
@ -701,7 +700,7 @@ static void insert_packet(struct ti_ohci *ohci,
d->prg_cpu[idx]->data[2] = packet->header[2]; d->prg_cpu[idx]->data[2] = packet->header[2];
d->prg_cpu[idx]->data[3] = packet->header[3]; d->prg_cpu[idx]->data[3] = packet->header[3];
} }
packet_swab(d->prg_cpu[idx]->data, packet->tcode); header_le32_to_cpu(d->prg_cpu[idx]->data, packet->tcode);
} }
if (packet->data_size) { /* block transmit */ if (packet->data_size) { /* block transmit */
@ -777,7 +776,7 @@ static void insert_packet(struct ti_ohci *ohci,
d->prg_cpu[idx]->data[0] = packet->speed_code<<16 | d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
(packet->header[0] & 0xFFFF); (packet->header[0] & 0xFFFF);
d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000; d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
packet_swab(d->prg_cpu[idx]->data, packet->tcode); header_le32_to_cpu(d->prg_cpu[idx]->data, packet->tcode);
d->prg_cpu[idx]->begin.control = d->prg_cpu[idx]->begin.control =
cpu_to_le32(DMA_CTL_OUTPUT_MORE | cpu_to_le32(DMA_CTL_OUTPUT_MORE |
@ -2598,8 +2597,9 @@ static const int TCODE_SIZE[16] = {20, 0, 16, -1, 16, 20, 20, 0,
* Determine the length of a packet in the buffer * Determine the length of a packet in the buffer
* Optimization suggested by Pascal Drolet <pascal.drolet@informission.ca> * Optimization suggested by Pascal Drolet <pascal.drolet@informission.ca>
*/ */
static __inline__ int packet_length(struct dma_rcv_ctx *d, int idx, quadlet_t *buf_ptr, static inline int packet_length(struct dma_rcv_ctx *d, int idx,
int offset, unsigned char tcode, int noswap) quadlet_t *buf_ptr, int offset,
unsigned char tcode, int noswap)
{ {
int length = -1; int length = -1;
@ -2730,7 +2730,7 @@ static void dma_rcv_tasklet (unsigned long data)
* bus reset. We always ignore it. */ * bus reset. We always ignore it. */
if (tcode != OHCI1394_TCODE_PHY) { if (tcode != OHCI1394_TCODE_PHY) {
if (!ohci->no_swap_incoming) if (!ohci->no_swap_incoming)
packet_swab(d->spb, tcode); header_le32_to_cpu(d->spb, tcode);
DBGMSG("Packet received from node" DBGMSG("Packet received from node"
" %d ack=0x%02X spd=%d tcode=0x%X" " %d ack=0x%02X spd=%d tcode=0x%X"
" length=%d ctx=%d tlabel=%d", " length=%d ctx=%d tlabel=%d",
@ -2738,7 +2738,7 @@ static void dma_rcv_tasklet (unsigned long data)
(cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f, (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f,
(cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>21)&0x3, (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>21)&0x3,
tcode, length, d->ctx, tcode, length, d->ctx,
(cond_le32_to_cpu(d->spb[0], ohci->no_swap_incoming)>>10)&0x3f); (d->spb[0]>>10)&0x3f);
ack = (((cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f) ack = (((cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f)
== 0x11) ? 1 : 0; == 0x11) ? 1 : 0;
@ -3529,9 +3529,10 @@ static void ohci1394_pci_remove(struct pci_dev *pdev)
put_device(dev); put_device(dev);
} }
#ifdef CONFIG_PM
static int ohci1394_pci_resume (struct pci_dev *pdev) static int ohci1394_pci_resume (struct pci_dev *pdev)
{ {
/* PowerMac resume code comes first */
#ifdef CONFIG_PPC_PMAC #ifdef CONFIG_PPC_PMAC
if (machine_is(powermac)) { if (machine_is(powermac)) {
struct device_node *of_node; struct device_node *of_node;
@ -3543,17 +3544,23 @@ static int ohci1394_pci_resume (struct pci_dev *pdev)
} }
#endif /* CONFIG_PPC_PMAC */ #endif /* CONFIG_PPC_PMAC */
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev); pci_restore_state(pdev);
pci_enable_device(pdev); return pci_enable_device(pdev);
return 0;
} }
static int ohci1394_pci_suspend (struct pci_dev *pdev, pm_message_t state) static int ohci1394_pci_suspend (struct pci_dev *pdev, pm_message_t state)
{ {
pci_save_state(pdev); int err;
err = pci_save_state(pdev);
if (err)
goto out;
err = pci_set_power_state(pdev, pci_choose_state(pdev, state));
if (err)
goto out;
/* PowerMac suspend code comes last */
#ifdef CONFIG_PPC_PMAC #ifdef CONFIG_PPC_PMAC
if (machine_is(powermac)) { if (machine_is(powermac)) {
struct device_node *of_node; struct device_node *of_node;
@ -3563,11 +3570,11 @@ static int ohci1394_pci_suspend (struct pci_dev *pdev, pm_message_t state)
if (of_node) if (of_node)
pmac_call_feature(PMAC_FTR_1394_ENABLE, of_node, 0, 0); pmac_call_feature(PMAC_FTR_1394_ENABLE, of_node, 0, 0);
} }
#endif #endif /* CONFIG_PPC_PMAC */
out:
return 0; return err;
} }
#endif /* CONFIG_PM */
#define PCI_CLASS_FIREWIRE_OHCI ((PCI_CLASS_SERIAL_FIREWIRE << 8) | 0x10) #define PCI_CLASS_FIREWIRE_OHCI ((PCI_CLASS_SERIAL_FIREWIRE << 8) | 0x10)
@ -3590,8 +3597,10 @@ static struct pci_driver ohci1394_pci_driver = {
.id_table = ohci1394_pci_tbl, .id_table = ohci1394_pci_tbl,
.probe = ohci1394_pci_probe, .probe = ohci1394_pci_probe,
.remove = ohci1394_pci_remove, .remove = ohci1394_pci_remove,
#ifdef CONFIG_PM
.resume = ohci1394_pci_resume, .resume = ohci1394_pci_resume,
.suspend = ohci1394_pci_suspend, .suspend = ohci1394_pci_suspend,
#endif
}; };
/*********************************** /***********************************
@ -3718,5 +3727,7 @@ static int __init ohci1394_init(void)
return pci_register_driver(&ohci1394_pci_driver); return pci_register_driver(&ohci1394_pci_driver);
} }
module_init(ohci1394_init); /* Register before most other device drivers.
* Useful for remote debugging via physical DMA, e.g. using firescope. */
fs_initcall(ohci1394_init);
module_exit(ohci1394_cleanup); module_exit(ohci1394_cleanup);

View file

@ -29,9 +29,8 @@ struct file_info {
struct list_head req_pending; struct list_head req_pending;
struct list_head req_complete; struct list_head req_complete;
struct semaphore complete_sem;
spinlock_t reqlists_lock; spinlock_t reqlists_lock;
wait_queue_head_t poll_wait_complete; wait_queue_head_t wait_complete;
struct list_head addr_list; struct list_head addr_list;

View file

@ -44,14 +44,15 @@
#include <linux/compat.h> #include <linux/compat.h>
#include "csr1212.h" #include "csr1212.h"
#include "ieee1394.h"
#include "ieee1394_types.h"
#include "ieee1394_core.h"
#include "nodemgr.h"
#include "hosts.h"
#include "highlevel.h" #include "highlevel.h"
#include "iso.h" #include "hosts.h"
#include "ieee1394.h"
#include "ieee1394_core.h"
#include "ieee1394_hotplug.h"
#include "ieee1394_transactions.h" #include "ieee1394_transactions.h"
#include "ieee1394_types.h"
#include "iso.h"
#include "nodemgr.h"
#include "raw1394.h" #include "raw1394.h"
#include "raw1394-private.h" #include "raw1394-private.h"
@ -66,7 +67,7 @@
#define DBGMSG(fmt, args...) \ #define DBGMSG(fmt, args...) \
printk(KERN_INFO "raw1394:" fmt "\n" , ## args) printk(KERN_INFO "raw1394:" fmt "\n" , ## args)
#else #else
#define DBGMSG(fmt, args...) #define DBGMSG(fmt, args...) do {} while (0)
#endif #endif
static LIST_HEAD(host_info_list); static LIST_HEAD(host_info_list);
@ -132,10 +133,9 @@ static void free_pending_request(struct pending_request *req)
static void __queue_complete_req(struct pending_request *req) static void __queue_complete_req(struct pending_request *req)
{ {
struct file_info *fi = req->file_info; struct file_info *fi = req->file_info;
list_move_tail(&req->list, &fi->req_complete);
up(&fi->complete_sem); list_move_tail(&req->list, &fi->req_complete);
wake_up_interruptible(&fi->poll_wait_complete); wake_up(&fi->wait_complete);
} }
static void queue_complete_req(struct pending_request *req) static void queue_complete_req(struct pending_request *req)
@ -463,13 +463,36 @@ raw1394_compat_read(const char __user *buf, struct raw1394_request *r)
#endif #endif
/* get next completed request (caller must hold fi->reqlists_lock) */
static inline struct pending_request *__next_complete_req(struct file_info *fi)
{
struct list_head *lh;
struct pending_request *req = NULL;
if (!list_empty(&fi->req_complete)) {
lh = fi->req_complete.next;
list_del(lh);
req = list_entry(lh, struct pending_request, list);
}
return req;
}
/* atomically get next completed request */
static struct pending_request *next_complete_req(struct file_info *fi)
{
unsigned long flags;
struct pending_request *req;
spin_lock_irqsave(&fi->reqlists_lock, flags);
req = __next_complete_req(fi);
spin_unlock_irqrestore(&fi->reqlists_lock, flags);
return req;
}
static ssize_t raw1394_read(struct file *file, char __user * buffer, static ssize_t raw1394_read(struct file *file, char __user * buffer,
size_t count, loff_t * offset_is_ignored) size_t count, loff_t * offset_is_ignored)
{ {
unsigned long flags;
struct file_info *fi = (struct file_info *)file->private_data; struct file_info *fi = (struct file_info *)file->private_data;
struct list_head *lh;
struct pending_request *req; struct pending_request *req;
ssize_t ret; ssize_t ret;
@ -487,22 +510,21 @@ static ssize_t raw1394_read(struct file *file, char __user * buffer,
} }
if (file->f_flags & O_NONBLOCK) { if (file->f_flags & O_NONBLOCK) {
if (down_trylock(&fi->complete_sem)) { if (!(req = next_complete_req(fi)))
return -EAGAIN; return -EAGAIN;
}
} else { } else {
if (down_interruptible(&fi->complete_sem)) { /*
* NB: We call the macro wait_event_interruptible() with a
* condition argument with side effect. This is only possible
* because the side effect does not occur until the condition
* became true, and wait_event_interruptible() won't evaluate
* the condition again after that.
*/
if (wait_event_interruptible(fi->wait_complete,
(req = next_complete_req(fi))))
return -ERESTARTSYS; return -ERESTARTSYS;
}
} }
spin_lock_irqsave(&fi->reqlists_lock, flags);
lh = fi->req_complete.next;
list_del(lh);
spin_unlock_irqrestore(&fi->reqlists_lock, flags);
req = list_entry(lh, struct pending_request, list);
if (req->req.length) { if (req->req.length) {
if (copy_to_user(int2ptr(req->req.recvb), req->data, if (copy_to_user(int2ptr(req->req.recvb), req->data,
req->req.length)) { req->req.length)) {
@ -1752,6 +1774,7 @@ static int arm_register(struct file_info *fi, struct pending_request *req)
addr->notification_options |= addr->client_transactions; addr->notification_options |= addr->client_transactions;
addr->recvb = req->req.recvb; addr->recvb = req->req.recvb;
addr->rec_length = (u16) ((req->req.misc >> 16) & 0xFFFF); addr->rec_length = (u16) ((req->req.misc >> 16) & 0xFFFF);
spin_lock_irqsave(&host_info_lock, flags); spin_lock_irqsave(&host_info_lock, flags);
hi = find_host_info(fi->host); hi = find_host_info(fi->host);
same_host = 0; same_host = 0;
@ -1777,9 +1800,9 @@ static int arm_register(struct file_info *fi, struct pending_request *req)
} }
if (same_host) { if (same_host) {
/* addressrange occupied by same host */ /* addressrange occupied by same host */
spin_unlock_irqrestore(&host_info_lock, flags);
vfree(addr->addr_space_buffer); vfree(addr->addr_space_buffer);
kfree(addr); kfree(addr);
spin_unlock_irqrestore(&host_info_lock, flags);
return (-EALREADY); return (-EALREADY);
} }
/* another host with valid address-entry containing same addressrange */ /* another host with valid address-entry containing same addressrange */
@ -1807,6 +1830,8 @@ static int arm_register(struct file_info *fi, struct pending_request *req)
} }
} }
} }
spin_unlock_irqrestore(&host_info_lock, flags);
if (another_host) { if (another_host) {
DBGMSG("another hosts entry is valid -> SUCCESS"); DBGMSG("another hosts entry is valid -> SUCCESS");
if (copy_to_user(int2ptr(req->req.recvb), if (copy_to_user(int2ptr(req->req.recvb),
@ -1815,11 +1840,11 @@ static int arm_register(struct file_info *fi, struct pending_request *req)
" address-range-entry is invalid -> EFAULT !!!\n"); " address-range-entry is invalid -> EFAULT !!!\n");
vfree(addr->addr_space_buffer); vfree(addr->addr_space_buffer);
kfree(addr); kfree(addr);
spin_unlock_irqrestore(&host_info_lock, flags);
return (-EFAULT); return (-EFAULT);
} }
free_pending_request(req); /* immediate success or fail */ free_pending_request(req); /* immediate success or fail */
/* INSERT ENTRY */ /* INSERT ENTRY */
spin_lock_irqsave(&host_info_lock, flags);
list_add_tail(&addr->addr_list, &fi->addr_list); list_add_tail(&addr->addr_list, &fi->addr_list);
spin_unlock_irqrestore(&host_info_lock, flags); spin_unlock_irqrestore(&host_info_lock, flags);
return sizeof(struct raw1394_request); return sizeof(struct raw1394_request);
@ -1830,15 +1855,15 @@ static int arm_register(struct file_info *fi, struct pending_request *req)
req->req.address + req->req.length); req->req.address + req->req.length);
if (retval) { if (retval) {
/* INSERT ENTRY */ /* INSERT ENTRY */
spin_lock_irqsave(&host_info_lock, flags);
list_add_tail(&addr->addr_list, &fi->addr_list); list_add_tail(&addr->addr_list, &fi->addr_list);
spin_unlock_irqrestore(&host_info_lock, flags);
} else { } else {
DBGMSG("arm_register failed errno: %d \n", retval); DBGMSG("arm_register failed errno: %d \n", retval);
vfree(addr->addr_space_buffer); vfree(addr->addr_space_buffer);
kfree(addr); kfree(addr);
spin_unlock_irqrestore(&host_info_lock, flags);
return (-EALREADY); return (-EALREADY);
} }
spin_unlock_irqrestore(&host_info_lock, flags);
free_pending_request(req); /* immediate success or fail */ free_pending_request(req); /* immediate success or fail */
return sizeof(struct raw1394_request); return sizeof(struct raw1394_request);
} }
@ -1904,10 +1929,10 @@ static int arm_unregister(struct file_info *fi, struct pending_request *req)
if (another_host) { if (another_host) {
DBGMSG("delete entry from list -> success"); DBGMSG("delete entry from list -> success");
list_del(&addr->addr_list); list_del(&addr->addr_list);
spin_unlock_irqrestore(&host_info_lock, flags);
vfree(addr->addr_space_buffer); vfree(addr->addr_space_buffer);
kfree(addr); kfree(addr);
free_pending_request(req); /* immediate success or fail */ free_pending_request(req); /* immediate success or fail */
spin_unlock_irqrestore(&host_info_lock, flags);
return sizeof(struct raw1394_request); return sizeof(struct raw1394_request);
} }
retval = retval =
@ -1949,23 +1974,19 @@ static int arm_get_buf(struct file_info *fi, struct pending_request *req)
(arm_addr->end > req->req.address)) { (arm_addr->end > req->req.address)) {
if (req->req.address + req->req.length <= arm_addr->end) { if (req->req.address + req->req.length <= arm_addr->end) {
offset = req->req.address - arm_addr->start; offset = req->req.address - arm_addr->start;
spin_unlock_irqrestore(&host_info_lock, flags);
DBGMSG DBGMSG
("arm_get_buf copy_to_user( %08X, %p, %u )", ("arm_get_buf copy_to_user( %08X, %p, %u )",
(u32) req->req.recvb, (u32) req->req.recvb,
arm_addr->addr_space_buffer + offset, arm_addr->addr_space_buffer + offset,
(u32) req->req.length); (u32) req->req.length);
if (copy_to_user if (copy_to_user
(int2ptr(req->req.recvb), (int2ptr(req->req.recvb),
arm_addr->addr_space_buffer + offset, arm_addr->addr_space_buffer + offset,
req->req.length)) { req->req.length))
spin_unlock_irqrestore(&host_info_lock,
flags);
return (-EFAULT); return (-EFAULT);
}
spin_unlock_irqrestore(&host_info_lock, flags);
/* We have to free the request, because we /* We have to free the request, because we
* queue no response, and therefore nobody * queue no response, and therefore nobody
* will free it. */ * will free it. */
@ -2005,24 +2026,23 @@ static int arm_set_buf(struct file_info *fi, struct pending_request *req)
(arm_addr->end > req->req.address)) { (arm_addr->end > req->req.address)) {
if (req->req.address + req->req.length <= arm_addr->end) { if (req->req.address + req->req.length <= arm_addr->end) {
offset = req->req.address - arm_addr->start; offset = req->req.address - arm_addr->start;
spin_unlock_irqrestore(&host_info_lock, flags);
DBGMSG DBGMSG
("arm_set_buf copy_from_user( %p, %08X, %u )", ("arm_set_buf copy_from_user( %p, %08X, %u )",
arm_addr->addr_space_buffer + offset, arm_addr->addr_space_buffer + offset,
(u32) req->req.sendb, (u32) req->req.sendb,
(u32) req->req.length); (u32) req->req.length);
if (copy_from_user if (copy_from_user
(arm_addr->addr_space_buffer + offset, (arm_addr->addr_space_buffer + offset,
int2ptr(req->req.sendb), int2ptr(req->req.sendb),
req->req.length)) { req->req.length))
spin_unlock_irqrestore(&host_info_lock,
flags);
return (-EFAULT); return (-EFAULT);
}
spin_unlock_irqrestore(&host_info_lock, flags); /* We have to free the request, because we
free_pending_request(req); /* we have to free the request, because we queue no response, and therefore nobody will free it */ * queue no response, and therefore nobody
* will free it. */
free_pending_request(req);
return sizeof(struct raw1394_request); return sizeof(struct raw1394_request);
} else { } else {
DBGMSG("arm_set_buf request exceeded mapping"); DBGMSG("arm_set_buf request exceeded mapping");
@ -2744,7 +2764,7 @@ static unsigned int raw1394_poll(struct file *file, poll_table * pt)
unsigned int mask = POLLOUT | POLLWRNORM; unsigned int mask = POLLOUT | POLLWRNORM;
unsigned long flags; unsigned long flags;
poll_wait(file, &fi->poll_wait_complete, pt); poll_wait(file, &fi->wait_complete, pt);
spin_lock_irqsave(&fi->reqlists_lock, flags); spin_lock_irqsave(&fi->reqlists_lock, flags);
if (!list_empty(&fi->req_complete)) { if (!list_empty(&fi->req_complete)) {
@ -2769,9 +2789,8 @@ static int raw1394_open(struct inode *inode, struct file *file)
fi->state = opened; fi->state = opened;
INIT_LIST_HEAD(&fi->req_pending); INIT_LIST_HEAD(&fi->req_pending);
INIT_LIST_HEAD(&fi->req_complete); INIT_LIST_HEAD(&fi->req_complete);
sema_init(&fi->complete_sem, 0);
spin_lock_init(&fi->reqlists_lock); spin_lock_init(&fi->reqlists_lock);
init_waitqueue_head(&fi->poll_wait_complete); init_waitqueue_head(&fi->wait_complete);
INIT_LIST_HEAD(&fi->addr_list); INIT_LIST_HEAD(&fi->addr_list);
file->private_data = fi; file->private_data = fi;
@ -2784,7 +2803,7 @@ static int raw1394_release(struct inode *inode, struct file *file)
struct file_info *fi = file->private_data; struct file_info *fi = file->private_data;
struct list_head *lh; struct list_head *lh;
struct pending_request *req; struct pending_request *req;
int done = 0, i, fail = 0; int i, fail;
int retval = 0; int retval = 0;
struct list_head *entry; struct list_head *entry;
struct arm_addr *addr = NULL; struct arm_addr *addr = NULL;
@ -2864,25 +2883,28 @@ static int raw1394_release(struct inode *inode, struct file *file)
"error(s) occurred \n"); "error(s) occurred \n");
} }
while (!done) { for (;;) {
/* This locked section guarantees that neither
* complete nor pending requests exist once i!=0 */
spin_lock_irqsave(&fi->reqlists_lock, flags); spin_lock_irqsave(&fi->reqlists_lock, flags);
while ((req = __next_complete_req(fi)))
while (!list_empty(&fi->req_complete)) {
lh = fi->req_complete.next;
list_del(lh);
req = list_entry(lh, struct pending_request, list);
free_pending_request(req); free_pending_request(req);
}
if (list_empty(&fi->req_pending))
done = 1;
i = list_empty(&fi->req_pending);
spin_unlock_irqrestore(&fi->reqlists_lock, flags); spin_unlock_irqrestore(&fi->reqlists_lock, flags);
if (!done) if (i)
down_interruptible(&fi->complete_sem); break;
/*
* Sleep until more requests can be freed.
*
* NB: We call the macro wait_event() with a condition argument
* with side effect. This is only possible because the side
* effect does not occur until the condition became true, and
* wait_event() won't evaluate the condition again after that.
*/
wait_event(fi->wait_complete, (req = next_complete_req(fi)));
free_pending_request(req);
} }
/* Remove any sub-trees left by user space programs */ /* Remove any sub-trees left by user space programs */

View file

@ -38,31 +38,36 @@
* but the code needs additional debugging. * but the code needs additional debugging.
*/ */
#include <linux/blkdev.h>
#include <linux/compiler.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/gfp.h>
#include <linux/init.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/list.h> #include <linux/list.h>
#include <linux/string.h>
#include <linux/stringify.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/fs.h>
#include <linux/poll.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/moduleparam.h> #include <linux/moduleparam.h>
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/blkdev.h>
#include <linux/smp_lock.h>
#include <linux/init.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/stat.h>
#include <linux/string.h>
#include <linux/stringify.h>
#include <linux/types.h>
#include <linux/wait.h>
#include <asm/current.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/byteorder.h> #include <asm/byteorder.h>
#include <asm/atomic.h> #include <asm/errno.h>
#include <asm/system.h> #include <asm/param.h>
#include <asm/scatterlist.h> #include <asm/scatterlist.h>
#include <asm/system.h>
#include <asm/types.h>
#ifdef CONFIG_IEEE1394_SBP2_PHYS_DMA
#include <asm/io.h> /* for bus_to_virt */
#endif
#include <scsi/scsi.h> #include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h> #include <scsi/scsi_cmnd.h>
@ -71,13 +76,14 @@
#include <scsi/scsi_host.h> #include <scsi/scsi_host.h>
#include "csr1212.h" #include "csr1212.h"
#include "ieee1394.h"
#include "ieee1394_types.h"
#include "ieee1394_core.h"
#include "nodemgr.h"
#include "hosts.h"
#include "highlevel.h" #include "highlevel.h"
#include "hosts.h"
#include "ieee1394.h"
#include "ieee1394_core.h"
#include "ieee1394_hotplug.h"
#include "ieee1394_transactions.h" #include "ieee1394_transactions.h"
#include "ieee1394_types.h"
#include "nodemgr.h"
#include "sbp2.h" #include "sbp2.h"
/* /*
@ -173,11 +179,6 @@ MODULE_PARM_DESC(workarounds, "Work around device bugs (default = 0"
", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE) ", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE)
", or a combination)"); ", or a combination)");
/* legacy parameter */
static int force_inquiry_hack;
module_param(force_inquiry_hack, int, 0644);
MODULE_PARM_DESC(force_inquiry_hack, "Deprecated, use 'workarounds'");
/* /*
* Export information about protocols/devices supported by this driver. * Export information about protocols/devices supported by this driver.
*/ */
@ -208,9 +209,9 @@ static u32 global_outstanding_command_orbs = 0;
#define outstanding_orb_incr global_outstanding_command_orbs++ #define outstanding_orb_incr global_outstanding_command_orbs++
#define outstanding_orb_decr global_outstanding_command_orbs-- #define outstanding_orb_decr global_outstanding_command_orbs--
#else #else
#define SBP2_ORB_DEBUG(fmt, args...) #define SBP2_ORB_DEBUG(fmt, args...) do {} while (0)
#define outstanding_orb_incr #define outstanding_orb_incr do {} while (0)
#define outstanding_orb_decr #define outstanding_orb_decr do {} while (0)
#endif #endif
#ifdef CONFIG_IEEE1394_SBP2_DEBUG_DMA #ifdef CONFIG_IEEE1394_SBP2_DEBUG_DMA
@ -222,8 +223,8 @@ static u32 global_outstanding_command_orbs = 0;
--global_outstanding_dmas, ## args) --global_outstanding_dmas, ## args)
static u32 global_outstanding_dmas = 0; static u32 global_outstanding_dmas = 0;
#else #else
#define SBP2_DMA_ALLOC(fmt, args...) #define SBP2_DMA_ALLOC(fmt, args...) do {} while (0)
#define SBP2_DMA_FREE(fmt, args...) #define SBP2_DMA_FREE(fmt, args...) do {} while (0)
#endif #endif
#if CONFIG_IEEE1394_SBP2_DEBUG >= 2 #if CONFIG_IEEE1394_SBP2_DEBUG >= 2
@ -237,7 +238,7 @@ static u32 global_outstanding_dmas = 0;
#define SBP2_NOTICE(fmt, args...) HPSB_NOTICE("sbp2: "fmt, ## args) #define SBP2_NOTICE(fmt, args...) HPSB_NOTICE("sbp2: "fmt, ## args)
#define SBP2_WARN(fmt, args...) HPSB_WARN("sbp2: "fmt, ## args) #define SBP2_WARN(fmt, args...) HPSB_WARN("sbp2: "fmt, ## args)
#else #else
#define SBP2_DEBUG(fmt, args...) #define SBP2_DEBUG(fmt, args...) do {} while (0)
#define SBP2_INFO(fmt, args...) HPSB_INFO("sbp2: "fmt, ## args) #define SBP2_INFO(fmt, args...) HPSB_INFO("sbp2: "fmt, ## args)
#define SBP2_NOTICE(fmt, args...) HPSB_NOTICE("sbp2: "fmt, ## args) #define SBP2_NOTICE(fmt, args...) HPSB_NOTICE("sbp2: "fmt, ## args)
#define SBP2_WARN(fmt, args...) HPSB_WARN("sbp2: "fmt, ## args) #define SBP2_WARN(fmt, args...) HPSB_WARN("sbp2: "fmt, ## args)
@ -356,7 +357,7 @@ static const struct {
/* /*
* Converts a buffer from be32 to cpu byte ordering. Length is in bytes. * Converts a buffer from be32 to cpu byte ordering. Length is in bytes.
*/ */
static __inline__ void sbp2util_be32_to_cpu_buffer(void *buffer, int length) static inline void sbp2util_be32_to_cpu_buffer(void *buffer, int length)
{ {
u32 *temp = buffer; u32 *temp = buffer;
@ -369,7 +370,7 @@ static __inline__ void sbp2util_be32_to_cpu_buffer(void *buffer, int length)
/* /*
* Converts a buffer from cpu to be32 byte ordering. Length is in bytes. * Converts a buffer from cpu to be32 byte ordering. Length is in bytes.
*/ */
static __inline__ void sbp2util_cpu_to_be32_buffer(void *buffer, int length) static inline void sbp2util_cpu_to_be32_buffer(void *buffer, int length)
{ {
u32 *temp = buffer; u32 *temp = buffer;
@ -380,8 +381,8 @@ static __inline__ void sbp2util_cpu_to_be32_buffer(void *buffer, int length)
} }
#else /* BIG_ENDIAN */ #else /* BIG_ENDIAN */
/* Why waste the cpu cycles? */ /* Why waste the cpu cycles? */
#define sbp2util_be32_to_cpu_buffer(x,y) #define sbp2util_be32_to_cpu_buffer(x,y) do {} while (0)
#define sbp2util_cpu_to_be32_buffer(x,y) #define sbp2util_cpu_to_be32_buffer(x,y) do {} while (0)
#endif #endif
#ifdef CONFIG_IEEE1394_SBP2_PACKET_DUMP #ifdef CONFIG_IEEE1394_SBP2_PACKET_DUMP
@ -417,24 +418,26 @@ static void sbp2util_packet_dump(void *buffer, int length, char *dump_name,
return; return;
} }
#else #else
#define sbp2util_packet_dump(w,x,y,z) #define sbp2util_packet_dump(w,x,y,z) do {} while (0)
#endif #endif
/* static DECLARE_WAIT_QUEUE_HEAD(access_wq);
* Goofy routine that basically does a down_timeout function.
*/
static int sbp2util_down_timeout(atomic_t *done, int timeout)
{
int i;
for (i = timeout; (i > 0 && atomic_read(done) == 0); i-= HZ/10) { /*
if (msleep_interruptible(100)) /* 100ms */ * Waits for completion of an SBP-2 access request.
return 1; * Returns nonzero if timed out or prematurely interrupted.
} */
return (i > 0) ? 0 : 1; static int sbp2util_access_timeout(struct scsi_id_instance_data *scsi_id,
int timeout)
{
long leftover = wait_event_interruptible_timeout(
access_wq, scsi_id->access_complete, timeout);
scsi_id->access_complete = 0;
return leftover <= 0;
} }
/* Free's an allocated packet */ /* Frees an allocated packet */
static void sbp2_free_packet(struct hpsb_packet *packet) static void sbp2_free_packet(struct hpsb_packet *packet)
{ {
hpsb_free_tlabel(packet); hpsb_free_tlabel(packet);
@ -468,6 +471,44 @@ static int sbp2util_node_write_no_wait(struct node_entry *ne, u64 addr,
return 0; return 0;
} }
static void sbp2util_notify_fetch_agent(struct scsi_id_instance_data *scsi_id,
u64 offset, quadlet_t *data, size_t len)
{
/*
* There is a small window after a bus reset within which the node
* entry's generation is current but the reconnect wasn't completed.
*/
if (unlikely(atomic_read(&scsi_id->state) == SBP2LU_STATE_IN_RESET))
return;
if (hpsb_node_write(scsi_id->ne,
scsi_id->sbp2_command_block_agent_addr + offset,
data, len))
SBP2_ERR("sbp2util_notify_fetch_agent failed.");
/*
* Now accept new SCSI commands, unless a bus reset happended during
* hpsb_node_write.
*/
if (likely(atomic_read(&scsi_id->state) != SBP2LU_STATE_IN_RESET))
scsi_unblock_requests(scsi_id->scsi_host);
}
static void sbp2util_write_orb_pointer(void *p)
{
quadlet_t data[2];
data[0] = ORB_SET_NODE_ID(
((struct scsi_id_instance_data *)p)->hi->host->node_id);
data[1] = ((struct scsi_id_instance_data *)p)->last_orb_dma;
sbp2util_cpu_to_be32_buffer(data, 8);
sbp2util_notify_fetch_agent(p, SBP2_ORB_POINTER_OFFSET, data, 8);
}
static void sbp2util_write_doorbell(void *p)
{
sbp2util_notify_fetch_agent(p, SBP2_DOORBELL_OFFSET, NULL, 4);
}
/* /*
* This function is called to create a pool of command orbs used for * This function is called to create a pool of command orbs used for
* command processing. It is called when a new sbp2 device is detected. * command processing. It is called when a new sbp2 device is detected.
@ -492,7 +533,7 @@ static int sbp2util_create_command_orb_pool(struct scsi_id_instance_data *scsi_i
command->command_orb_dma = command->command_orb_dma =
pci_map_single(hi->host->pdev, &command->command_orb, pci_map_single(hi->host->pdev, &command->command_orb,
sizeof(struct sbp2_command_orb), sizeof(struct sbp2_command_orb),
PCI_DMA_BIDIRECTIONAL); PCI_DMA_TODEVICE);
SBP2_DMA_ALLOC("single command orb DMA"); SBP2_DMA_ALLOC("single command orb DMA");
command->sge_dma = command->sge_dma =
pci_map_single(hi->host->pdev, pci_map_single(hi->host->pdev,
@ -525,7 +566,7 @@ static void sbp2util_remove_command_orb_pool(struct scsi_id_instance_data *scsi_
/* Release our generic DMA's */ /* Release our generic DMA's */
pci_unmap_single(host->pdev, command->command_orb_dma, pci_unmap_single(host->pdev, command->command_orb_dma,
sizeof(struct sbp2_command_orb), sizeof(struct sbp2_command_orb),
PCI_DMA_BIDIRECTIONAL); PCI_DMA_TODEVICE);
SBP2_DMA_FREE("single command orb DMA"); SBP2_DMA_FREE("single command orb DMA");
pci_unmap_single(host->pdev, command->sge_dma, pci_unmap_single(host->pdev, command->sge_dma,
sizeof(command->scatter_gather_element), sizeof(command->scatter_gather_element),
@ -715,6 +756,7 @@ static int sbp2_remove(struct device *dev)
sbp2scsi_complete_all_commands(scsi_id, DID_NO_CONNECT); sbp2scsi_complete_all_commands(scsi_id, DID_NO_CONNECT);
/* scsi_remove_device() will trigger shutdown functions of SCSI /* scsi_remove_device() will trigger shutdown functions of SCSI
* highlevel drivers which would deadlock if blocked. */ * highlevel drivers which would deadlock if blocked. */
atomic_set(&scsi_id->state, SBP2LU_STATE_IN_SHUTDOWN);
scsi_unblock_requests(scsi_id->scsi_host); scsi_unblock_requests(scsi_id->scsi_host);
} }
sdev = scsi_id->sdev; sdev = scsi_id->sdev;
@ -766,10 +808,12 @@ static int sbp2_update(struct unit_directory *ud)
*/ */
sbp2scsi_complete_all_commands(scsi_id, DID_BUS_BUSY); sbp2scsi_complete_all_commands(scsi_id, DID_BUS_BUSY);
/* Make sure we unblock requests (since this is likely after a bus /* Accept new commands unless there was another bus reset in the
* reset). */ * meantime. */
scsi_unblock_requests(scsi_id->scsi_host); if (hpsb_node_entry_valid(scsi_id->ne)) {
atomic_set(&scsi_id->state, SBP2LU_STATE_RUNNING);
scsi_unblock_requests(scsi_id->scsi_host);
}
return 0; return 0;
} }
@ -794,11 +838,12 @@ static struct scsi_id_instance_data *sbp2_alloc_device(struct unit_directory *ud
scsi_id->speed_code = IEEE1394_SPEED_100; scsi_id->speed_code = IEEE1394_SPEED_100;
scsi_id->max_payload_size = sbp2_speedto_max_payload[IEEE1394_SPEED_100]; scsi_id->max_payload_size = sbp2_speedto_max_payload[IEEE1394_SPEED_100];
scsi_id->status_fifo_addr = CSR1212_INVALID_ADDR_SPACE; scsi_id->status_fifo_addr = CSR1212_INVALID_ADDR_SPACE;
atomic_set(&scsi_id->sbp2_login_complete, 0);
INIT_LIST_HEAD(&scsi_id->sbp2_command_orb_inuse); INIT_LIST_HEAD(&scsi_id->sbp2_command_orb_inuse);
INIT_LIST_HEAD(&scsi_id->sbp2_command_orb_completed); INIT_LIST_HEAD(&scsi_id->sbp2_command_orb_completed);
INIT_LIST_HEAD(&scsi_id->scsi_list); INIT_LIST_HEAD(&scsi_id->scsi_list);
spin_lock_init(&scsi_id->sbp2_command_orb_lock); spin_lock_init(&scsi_id->sbp2_command_orb_lock);
atomic_set(&scsi_id->state, SBP2LU_STATE_RUNNING);
INIT_WORK(&scsi_id->protocol_work, NULL, NULL);
ud->device.driver_data = scsi_id; ud->device.driver_data = scsi_id;
@ -881,11 +926,14 @@ static void sbp2_host_reset(struct hpsb_host *host)
struct scsi_id_instance_data *scsi_id; struct scsi_id_instance_data *scsi_id;
hi = hpsb_get_hostinfo(&sbp2_highlevel, host); hi = hpsb_get_hostinfo(&sbp2_highlevel, host);
if (!hi)
if (hi) { return;
list_for_each_entry(scsi_id, &hi->scsi_ids, scsi_list) list_for_each_entry(scsi_id, &hi->scsi_ids, scsi_list)
if (likely(atomic_read(&scsi_id->state) !=
SBP2LU_STATE_IN_SHUTDOWN)) {
atomic_set(&scsi_id->state, SBP2LU_STATE_IN_RESET);
scsi_block_requests(scsi_id->scsi_host); scsi_block_requests(scsi_id->scsi_host);
} }
} }
/* /*
@ -970,8 +1018,7 @@ static int sbp2_start_device(struct scsi_id_instance_data *scsi_id)
* connected to the sbp2 device being removed. That host would * connected to the sbp2 device being removed. That host would
* have a certain amount of time to relogin before the sbp2 device * have a certain amount of time to relogin before the sbp2 device
* allows someone else to login instead. One second makes sense. */ * allows someone else to login instead. One second makes sense. */
msleep_interruptible(1000); if (msleep_interruptible(1000)) {
if (signal_pending(current)) {
sbp2_remove_device(scsi_id); sbp2_remove_device(scsi_id);
return -EINTR; return -EINTR;
} }
@ -1036,7 +1083,7 @@ static void sbp2_remove_device(struct scsi_id_instance_data *scsi_id)
scsi_remove_host(scsi_id->scsi_host); scsi_remove_host(scsi_id->scsi_host);
scsi_host_put(scsi_id->scsi_host); scsi_host_put(scsi_id->scsi_host);
} }
flush_scheduled_work();
sbp2util_remove_command_orb_pool(scsi_id); sbp2util_remove_command_orb_pool(scsi_id);
list_del(&scsi_id->scsi_list); list_del(&scsi_id->scsi_list);
@ -1182,17 +1229,14 @@ static int sbp2_query_logins(struct scsi_id_instance_data *scsi_id)
"sbp2 query logins orb", scsi_id->query_logins_orb_dma); "sbp2 query logins orb", scsi_id->query_logins_orb_dma);
memset(scsi_id->query_logins_response, 0, sizeof(struct sbp2_query_logins_response)); memset(scsi_id->query_logins_response, 0, sizeof(struct sbp2_query_logins_response));
memset(&scsi_id->status_block, 0, sizeof(struct sbp2_status_block));
data[0] = ORB_SET_NODE_ID(hi->host->node_id); data[0] = ORB_SET_NODE_ID(hi->host->node_id);
data[1] = scsi_id->query_logins_orb_dma; data[1] = scsi_id->query_logins_orb_dma;
sbp2util_cpu_to_be32_buffer(data, 8); sbp2util_cpu_to_be32_buffer(data, 8);
atomic_set(&scsi_id->sbp2_login_complete, 0);
hpsb_node_write(scsi_id->ne, scsi_id->sbp2_management_agent_addr, data, 8); hpsb_node_write(scsi_id->ne, scsi_id->sbp2_management_agent_addr, data, 8);
if (sbp2util_down_timeout(&scsi_id->sbp2_login_complete, 2*HZ)) { if (sbp2util_access_timeout(scsi_id, 2*HZ)) {
SBP2_INFO("Error querying logins to SBP-2 device - timed out"); SBP2_INFO("Error querying logins to SBP-2 device - timed out");
return -EIO; return -EIO;
} }
@ -1202,11 +1246,8 @@ static int sbp2_query_logins(struct scsi_id_instance_data *scsi_id)
return -EIO; return -EIO;
} }
if (STATUS_GET_RESP(scsi_id->status_block.ORB_offset_hi_misc) || if (STATUS_TEST_RDS(scsi_id->status_block.ORB_offset_hi_misc)) {
STATUS_GET_DEAD_BIT(scsi_id->status_block.ORB_offset_hi_misc) || SBP2_INFO("Error querying logins to SBP-2 device - failed");
STATUS_GET_SBP_STATUS(scsi_id->status_block.ORB_offset_hi_misc)) {
SBP2_INFO("Error querying logins to SBP-2 device - timed out");
return -EIO; return -EIO;
} }
@ -1278,21 +1319,18 @@ static int sbp2_login_device(struct scsi_id_instance_data *scsi_id)
"sbp2 login orb", scsi_id->login_orb_dma); "sbp2 login orb", scsi_id->login_orb_dma);
memset(scsi_id->login_response, 0, sizeof(struct sbp2_login_response)); memset(scsi_id->login_response, 0, sizeof(struct sbp2_login_response));
memset(&scsi_id->status_block, 0, sizeof(struct sbp2_status_block));
data[0] = ORB_SET_NODE_ID(hi->host->node_id); data[0] = ORB_SET_NODE_ID(hi->host->node_id);
data[1] = scsi_id->login_orb_dma; data[1] = scsi_id->login_orb_dma;
sbp2util_cpu_to_be32_buffer(data, 8); sbp2util_cpu_to_be32_buffer(data, 8);
atomic_set(&scsi_id->sbp2_login_complete, 0);
hpsb_node_write(scsi_id->ne, scsi_id->sbp2_management_agent_addr, data, 8); hpsb_node_write(scsi_id->ne, scsi_id->sbp2_management_agent_addr, data, 8);
/* /*
* Wait for login status (up to 20 seconds)... * Wait for login status (up to 20 seconds)...
*/ */
if (sbp2util_down_timeout(&scsi_id->sbp2_login_complete, 20*HZ)) { if (sbp2util_access_timeout(scsi_id, 20*HZ)) {
SBP2_ERR("Error logging into SBP-2 device - login timed-out"); SBP2_ERR("Error logging into SBP-2 device - timed out");
return -EIO; return -EIO;
} }
@ -1300,18 +1338,12 @@ static int sbp2_login_device(struct scsi_id_instance_data *scsi_id)
* Sanity. Make sure status returned matches login orb. * Sanity. Make sure status returned matches login orb.
*/ */
if (scsi_id->status_block.ORB_offset_lo != scsi_id->login_orb_dma) { if (scsi_id->status_block.ORB_offset_lo != scsi_id->login_orb_dma) {
SBP2_ERR("Error logging into SBP-2 device - login timed-out"); SBP2_ERR("Error logging into SBP-2 device - timed out");
return -EIO; return -EIO;
} }
/* if (STATUS_TEST_RDS(scsi_id->status_block.ORB_offset_hi_misc)) {
* Check status SBP2_ERR("Error logging into SBP-2 device - failed");
*/
if (STATUS_GET_RESP(scsi_id->status_block.ORB_offset_hi_misc) ||
STATUS_GET_DEAD_BIT(scsi_id->status_block.ORB_offset_hi_misc) ||
STATUS_GET_SBP_STATUS(scsi_id->status_block.ORB_offset_hi_misc)) {
SBP2_ERR("Error logging into SBP-2 device - login failed");
return -EIO; return -EIO;
} }
@ -1335,9 +1367,7 @@ static int sbp2_login_device(struct scsi_id_instance_data *scsi_id)
scsi_id->sbp2_command_block_agent_addr &= 0x0000ffffffffffffULL; scsi_id->sbp2_command_block_agent_addr &= 0x0000ffffffffffffULL;
SBP2_INFO("Logged into SBP-2 device"); SBP2_INFO("Logged into SBP-2 device");
return 0; return 0;
} }
/* /*
@ -1387,21 +1417,17 @@ static int sbp2_logout_device(struct scsi_id_instance_data *scsi_id)
data[1] = scsi_id->logout_orb_dma; data[1] = scsi_id->logout_orb_dma;
sbp2util_cpu_to_be32_buffer(data, 8); sbp2util_cpu_to_be32_buffer(data, 8);
atomic_set(&scsi_id->sbp2_login_complete, 0);
error = hpsb_node_write(scsi_id->ne, error = hpsb_node_write(scsi_id->ne,
scsi_id->sbp2_management_agent_addr, data, 8); scsi_id->sbp2_management_agent_addr, data, 8);
if (error) if (error)
return error; return error;
/* Wait for device to logout...1 second. */ /* Wait for device to logout...1 second. */
if (sbp2util_down_timeout(&scsi_id->sbp2_login_complete, HZ)) if (sbp2util_access_timeout(scsi_id, HZ))
return -EIO; return -EIO;
SBP2_INFO("Logged out of SBP-2 device"); SBP2_INFO("Logged out of SBP-2 device");
return 0; return 0;
} }
/* /*
@ -1445,20 +1471,10 @@ static int sbp2_reconnect_device(struct scsi_id_instance_data *scsi_id)
sbp2util_packet_dump(scsi_id->reconnect_orb, sizeof(struct sbp2_reconnect_orb), sbp2util_packet_dump(scsi_id->reconnect_orb, sizeof(struct sbp2_reconnect_orb),
"sbp2 reconnect orb", scsi_id->reconnect_orb_dma); "sbp2 reconnect orb", scsi_id->reconnect_orb_dma);
/*
* Initialize status fifo
*/
memset(&scsi_id->status_block, 0, sizeof(struct sbp2_status_block));
/*
* Ok, let's write to the target's management agent register
*/
data[0] = ORB_SET_NODE_ID(hi->host->node_id); data[0] = ORB_SET_NODE_ID(hi->host->node_id);
data[1] = scsi_id->reconnect_orb_dma; data[1] = scsi_id->reconnect_orb_dma;
sbp2util_cpu_to_be32_buffer(data, 8); sbp2util_cpu_to_be32_buffer(data, 8);
atomic_set(&scsi_id->sbp2_login_complete, 0);
error = hpsb_node_write(scsi_id->ne, error = hpsb_node_write(scsi_id->ne,
scsi_id->sbp2_management_agent_addr, data, 8); scsi_id->sbp2_management_agent_addr, data, 8);
if (error) if (error)
@ -1467,8 +1483,8 @@ static int sbp2_reconnect_device(struct scsi_id_instance_data *scsi_id)
/* /*
* Wait for reconnect status (up to 1 second)... * Wait for reconnect status (up to 1 second)...
*/ */
if (sbp2util_down_timeout(&scsi_id->sbp2_login_complete, HZ)) { if (sbp2util_access_timeout(scsi_id, HZ)) {
SBP2_ERR("Error reconnecting to SBP-2 device - reconnect timed-out"); SBP2_ERR("Error reconnecting to SBP-2 device - timed out");
return -EIO; return -EIO;
} }
@ -1476,25 +1492,17 @@ static int sbp2_reconnect_device(struct scsi_id_instance_data *scsi_id)
* Sanity. Make sure status returned matches reconnect orb. * Sanity. Make sure status returned matches reconnect orb.
*/ */
if (scsi_id->status_block.ORB_offset_lo != scsi_id->reconnect_orb_dma) { if (scsi_id->status_block.ORB_offset_lo != scsi_id->reconnect_orb_dma) {
SBP2_ERR("Error reconnecting to SBP-2 device - reconnect timed-out"); SBP2_ERR("Error reconnecting to SBP-2 device - timed out");
return -EIO; return -EIO;
} }
/* if (STATUS_TEST_RDS(scsi_id->status_block.ORB_offset_hi_misc)) {
* Check status SBP2_ERR("Error reconnecting to SBP-2 device - failed");
*/
if (STATUS_GET_RESP(scsi_id->status_block.ORB_offset_hi_misc) ||
STATUS_GET_DEAD_BIT(scsi_id->status_block.ORB_offset_hi_misc) ||
STATUS_GET_SBP_STATUS(scsi_id->status_block.ORB_offset_hi_misc)) {
SBP2_ERR("Error reconnecting to SBP-2 device - reconnect failed");
return -EIO; return -EIO;
} }
HPSB_DEBUG("Reconnected to SBP-2 device"); HPSB_DEBUG("Reconnected to SBP-2 device");
return 0; return 0;
} }
/* /*
@ -1592,11 +1600,6 @@ static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id,
} }
workarounds = sbp2_default_workarounds; workarounds = sbp2_default_workarounds;
if (force_inquiry_hack) {
SBP2_WARN("force_inquiry_hack is deprecated. "
"Use parameter 'workarounds' instead.");
workarounds |= SBP2_WORKAROUND_INQUIRY_36;
}
if (!(workarounds & SBP2_WORKAROUND_OVERRIDE)) if (!(workarounds & SBP2_WORKAROUND_OVERRIDE))
for (i = 0; i < ARRAY_SIZE(sbp2_workarounds_table); i++) { for (i = 0; i < ARRAY_SIZE(sbp2_workarounds_table); i++) {
@ -1705,9 +1708,14 @@ static int sbp2_agent_reset(struct scsi_id_instance_data *scsi_id, int wait)
quadlet_t data; quadlet_t data;
u64 addr; u64 addr;
int retval; int retval;
unsigned long flags;
SBP2_DEBUG_ENTER(); SBP2_DEBUG_ENTER();
cancel_delayed_work(&scsi_id->protocol_work);
if (wait)
flush_scheduled_work();
data = ntohl(SBP2_AGENT_RESET_DATA); data = ntohl(SBP2_AGENT_RESET_DATA);
addr = scsi_id->sbp2_command_block_agent_addr + SBP2_AGENT_RESET_OFFSET; addr = scsi_id->sbp2_command_block_agent_addr + SBP2_AGENT_RESET_OFFSET;
@ -1724,7 +1732,9 @@ static int sbp2_agent_reset(struct scsi_id_instance_data *scsi_id, int wait)
/* /*
* Need to make sure orb pointer is written on next command * Need to make sure orb pointer is written on next command
*/ */
spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags);
scsi_id->last_orb = NULL; scsi_id->last_orb = NULL;
spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
return 0; return 0;
} }
@ -1961,13 +1971,17 @@ static void sbp2_create_command_orb(struct scsi_id_instance_data *scsi_id,
/* /*
* This function is called in order to begin a regular SBP-2 command. * This function is called in order to begin a regular SBP-2 command.
*/ */
static int sbp2_link_orb_command(struct scsi_id_instance_data *scsi_id, static void sbp2_link_orb_command(struct scsi_id_instance_data *scsi_id,
struct sbp2_command_info *command) struct sbp2_command_info *command)
{ {
struct sbp2scsi_host_info *hi = scsi_id->hi; struct sbp2scsi_host_info *hi = scsi_id->hi;
struct sbp2_command_orb *command_orb = &command->command_orb; struct sbp2_command_orb *command_orb = &command->command_orb;
struct node_entry *ne = scsi_id->ne; struct sbp2_command_orb *last_orb;
u64 addr; dma_addr_t last_orb_dma;
u64 addr = scsi_id->sbp2_command_block_agent_addr;
quadlet_t data[2];
size_t length;
unsigned long flags;
outstanding_orb_incr; outstanding_orb_incr;
SBP2_ORB_DEBUG("sending command orb %p, total orbs = %x", SBP2_ORB_DEBUG("sending command orb %p, total orbs = %x",
@ -1975,73 +1989,70 @@ static int sbp2_link_orb_command(struct scsi_id_instance_data *scsi_id,
pci_dma_sync_single_for_device(hi->host->pdev, command->command_orb_dma, pci_dma_sync_single_for_device(hi->host->pdev, command->command_orb_dma,
sizeof(struct sbp2_command_orb), sizeof(struct sbp2_command_orb),
PCI_DMA_BIDIRECTIONAL); PCI_DMA_TODEVICE);
pci_dma_sync_single_for_device(hi->host->pdev, command->sge_dma, pci_dma_sync_single_for_device(hi->host->pdev, command->sge_dma,
sizeof(command->scatter_gather_element), sizeof(command->scatter_gather_element),
PCI_DMA_BIDIRECTIONAL); PCI_DMA_BIDIRECTIONAL);
/* /*
* Check to see if there are any previous orbs to use * Check to see if there are any previous orbs to use
*/ */
if (scsi_id->last_orb == NULL) { spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags);
quadlet_t data[2]; last_orb = scsi_id->last_orb;
last_orb_dma = scsi_id->last_orb_dma;
if (!last_orb) {
/* /*
* Ok, let's write to the target's management agent register * last_orb == NULL means: We know that the target's fetch agent
* is not active right now.
*/ */
addr = scsi_id->sbp2_command_block_agent_addr + SBP2_ORB_POINTER_OFFSET; addr += SBP2_ORB_POINTER_OFFSET;
data[0] = ORB_SET_NODE_ID(hi->host->node_id); data[0] = ORB_SET_NODE_ID(hi->host->node_id);
data[1] = command->command_orb_dma; data[1] = command->command_orb_dma;
sbp2util_cpu_to_be32_buffer(data, 8); sbp2util_cpu_to_be32_buffer(data, 8);
length = 8;
SBP2_ORB_DEBUG("write command agent, command orb %p", command_orb);
if (sbp2util_node_write_no_wait(ne, addr, data, 8) < 0) {
SBP2_ERR("sbp2util_node_write_no_wait failed.\n");
return -EIO;
}
SBP2_ORB_DEBUG("write command agent complete");
scsi_id->last_orb = command_orb;
scsi_id->last_orb_dma = command->command_orb_dma;
} else { } else {
quadlet_t data;
/* /*
* We have an orb already sent (maybe or maybe not * last_orb != NULL means: We know that the target's fetch agent
* processed) that we can append this orb to. So do so, * is (very probably) not dead or in reset state right now.
* and ring the doorbell. Have to be very careful * We have an ORB already sent that we can append a new one to.
* modifying these next orb pointers, as they are accessed * The target's fetch agent may or may not have read this
* both by the sbp2 device and us. * previous ORB yet.
*/ */
scsi_id->last_orb->next_ORB_lo = pci_dma_sync_single_for_cpu(hi->host->pdev, last_orb_dma,
cpu_to_be32(command->command_orb_dma); sizeof(struct sbp2_command_orb),
PCI_DMA_TODEVICE);
last_orb->next_ORB_lo = cpu_to_be32(command->command_orb_dma);
wmb();
/* Tells hardware that this pointer is valid */ /* Tells hardware that this pointer is valid */
scsi_id->last_orb->next_ORB_hi = 0x0; last_orb->next_ORB_hi = 0;
pci_dma_sync_single_for_device(hi->host->pdev, pci_dma_sync_single_for_device(hi->host->pdev, last_orb_dma,
scsi_id->last_orb_dma,
sizeof(struct sbp2_command_orb), sizeof(struct sbp2_command_orb),
PCI_DMA_BIDIRECTIONAL); PCI_DMA_TODEVICE);
addr += SBP2_DOORBELL_OFFSET;
/* data[0] = 0;
* Ring the doorbell length = 4;
*/ }
data = cpu_to_be32(command->command_orb_dma); scsi_id->last_orb = command_orb;
addr = scsi_id->sbp2_command_block_agent_addr + SBP2_DOORBELL_OFFSET; scsi_id->last_orb_dma = command->command_orb_dma;
spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
SBP2_ORB_DEBUG("ring doorbell, command orb %p", command_orb);
SBP2_ORB_DEBUG("write to %s register, command orb %p",
if (sbp2util_node_write_no_wait(ne, addr, &data, 4) < 0) { last_orb ? "DOORBELL" : "ORB_POINTER", command_orb);
SBP2_ERR("sbp2util_node_write_no_wait failed"); if (sbp2util_node_write_no_wait(scsi_id->ne, addr, data, length)) {
return -EIO; /*
} * sbp2util_node_write_no_wait failed. We certainly ran out
* of transaction labels, perhaps just because there were no
scsi_id->last_orb = command_orb; * context switches which gave khpsbpkt a chance to collect
scsi_id->last_orb_dma = command->command_orb_dma; * free tlabels. Try again in non-atomic context. If necessary,
* the workqueue job will sleep to guaranteedly get a tlabel.
* We do not accept new commands until the job is over.
*/
scsi_block_requests(scsi_id->scsi_host);
PREPARE_WORK(&scsi_id->protocol_work,
last_orb ? sbp2util_write_doorbell:
sbp2util_write_orb_pointer,
scsi_id);
schedule_work(&scsi_id->protocol_work);
} }
return 0;
} }
/* /*
@ -2077,11 +2088,6 @@ static int sbp2_send_command(struct scsi_id_instance_data *scsi_id,
sbp2util_packet_dump(&command->command_orb, sizeof(struct sbp2_command_orb), sbp2util_packet_dump(&command->command_orb, sizeof(struct sbp2_command_orb),
"sbp2 command orb", command->command_orb_dma); "sbp2 command orb", command->command_orb_dma);
/*
* Initialize status fifo
*/
memset(&scsi_id->status_block, 0, sizeof(struct sbp2_status_block));
/* /*
* Link up the orb, and ring the doorbell if needed * Link up the orb, and ring the doorbell if needed
*/ */
@ -2123,12 +2129,14 @@ static unsigned int sbp2_status_to_sense_data(unchar *sbp2_status, unchar *sense
/* /*
* This function deals with status writes from the SBP-2 device * This function deals with status writes from the SBP-2 device
*/ */
static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, int destid, static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid,
quadlet_t *data, u64 addr, size_t length, u16 fl) int destid, quadlet_t *data, u64 addr,
size_t length, u16 fl)
{ {
struct sbp2scsi_host_info *hi; struct sbp2scsi_host_info *hi;
struct scsi_id_instance_data *scsi_id = NULL, *scsi_id_tmp; struct scsi_id_instance_data *scsi_id = NULL, *scsi_id_tmp;
struct scsi_cmnd *SCpnt = NULL; struct scsi_cmnd *SCpnt = NULL;
struct sbp2_status_block *sb;
u32 scsi_status = SBP2_SCSI_STATUS_GOOD; u32 scsi_status = SBP2_SCSI_STATUS_GOOD;
struct sbp2_command_info *command; struct sbp2_command_info *command;
unsigned long flags; unsigned long flags;
@ -2137,18 +2145,19 @@ static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, int dest
sbp2util_packet_dump(data, length, "sbp2 status write by device", (u32)addr); sbp2util_packet_dump(data, length, "sbp2 status write by device", (u32)addr);
if (!host) { if (unlikely(length < 8 || length > sizeof(struct sbp2_status_block))) {
SBP2_ERR("Wrong size of status block");
return RCODE_ADDRESS_ERROR;
}
if (unlikely(!host)) {
SBP2_ERR("host is NULL - this is bad!"); SBP2_ERR("host is NULL - this is bad!");
return RCODE_ADDRESS_ERROR; return RCODE_ADDRESS_ERROR;
} }
hi = hpsb_get_hostinfo(&sbp2_highlevel, host); hi = hpsb_get_hostinfo(&sbp2_highlevel, host);
if (unlikely(!hi)) {
if (!hi) {
SBP2_ERR("host info is NULL - this is bad!"); SBP2_ERR("host info is NULL - this is bad!");
return RCODE_ADDRESS_ERROR; return RCODE_ADDRESS_ERROR;
} }
/* /*
* Find our scsi_id structure by looking at the status fifo address * Find our scsi_id structure by looking at the status fifo address
* written to by the sbp2 device. * written to by the sbp2 device.
@ -2160,32 +2169,35 @@ static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, int dest
break; break;
} }
} }
if (unlikely(!scsi_id)) {
if (!scsi_id) {
SBP2_ERR("scsi_id is NULL - device is gone?"); SBP2_ERR("scsi_id is NULL - device is gone?");
return RCODE_ADDRESS_ERROR; return RCODE_ADDRESS_ERROR;
} }
/* /*
* Put response into scsi_id status fifo... * Put response into scsi_id status fifo buffer. The first two bytes
* come in big endian bit order. Often the target writes only a
* truncated status block, minimally the first two quadlets. The rest
* is implied to be zeros.
*/ */
memcpy(&scsi_id->status_block, data, length); sb = &scsi_id->status_block;
memset(sb->command_set_dependent, 0, sizeof(sb->command_set_dependent));
memcpy(sb, data, length);
sbp2util_be32_to_cpu_buffer(sb, 8);
/* /*
* Byte swap first two quadlets (8 bytes) of status for processing * Ignore unsolicited status. Handle command ORB status.
*/ */
sbp2util_be32_to_cpu_buffer(&scsi_id->status_block, 8); if (unlikely(STATUS_GET_SRC(sb->ORB_offset_hi_misc) == 2))
command = NULL;
/* else
* Handle command ORB status here if necessary. First, need to match status with command. command = sbp2util_find_command_for_orb(scsi_id,
*/ sb->ORB_offset_lo);
command = sbp2util_find_command_for_orb(scsi_id, scsi_id->status_block.ORB_offset_lo);
if (command) { if (command) {
SBP2_DEBUG("Found status for command ORB"); SBP2_DEBUG("Found status for command ORB");
pci_dma_sync_single_for_cpu(hi->host->pdev, command->command_orb_dma, pci_dma_sync_single_for_cpu(hi->host->pdev, command->command_orb_dma,
sizeof(struct sbp2_command_orb), sizeof(struct sbp2_command_orb),
PCI_DMA_BIDIRECTIONAL); PCI_DMA_TODEVICE);
pci_dma_sync_single_for_cpu(hi->host->pdev, command->sge_dma, pci_dma_sync_single_for_cpu(hi->host->pdev, command->sge_dma,
sizeof(command->scatter_gather_element), sizeof(command->scatter_gather_element),
PCI_DMA_BIDIRECTIONAL); PCI_DMA_BIDIRECTIONAL);
@ -2194,7 +2206,12 @@ static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, int dest
outstanding_orb_decr; outstanding_orb_decr;
/* /*
* Matched status with command, now grab scsi command pointers and check status * Matched status with command, now grab scsi command pointers
* and check status.
*/
/*
* FIXME: If the src field in the status is 1, the ORB DMA must
* not be reused until status for a subsequent ORB is received.
*/ */
SCpnt = command->Current_SCpnt; SCpnt = command->Current_SCpnt;
spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags); spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags);
@ -2202,61 +2219,64 @@ static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, int dest
spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags); spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
if (SCpnt) { if (SCpnt) {
u32 h = sb->ORB_offset_hi_misc;
u32 r = STATUS_GET_RESP(h);
/* if (r != RESP_STATUS_REQUEST_COMPLETE) {
* See if the target stored any scsi status information SBP2_WARN("resp 0x%x, sbp_status 0x%x",
*/ r, STATUS_GET_SBP_STATUS(h));
if (STATUS_GET_LENGTH(scsi_id->status_block.ORB_offset_hi_misc) > 1) { scsi_status =
/* r == RESP_STATUS_TRANSPORT_FAILURE ?
* Translate SBP-2 status to SCSI sense data SBP2_SCSI_STATUS_BUSY :
*/ SBP2_SCSI_STATUS_COMMAND_TERMINATED;
SBP2_DEBUG("CHECK CONDITION");
scsi_status = sbp2_status_to_sense_data((unchar *)&scsi_id->status_block, SCpnt->sense_buffer);
} }
/* /*
* Check to see if the dead bit is set. If so, we'll have to initiate * See if the target stored any scsi status information.
* a fetch agent reset.
*/ */
if (STATUS_GET_DEAD_BIT(scsi_id->status_block.ORB_offset_hi_misc)) { if (STATUS_GET_LEN(h) > 1) {
SBP2_DEBUG("CHECK CONDITION");
/* scsi_status = sbp2_status_to_sense_data(
* Initiate a fetch agent reset. (unchar *)sb, SCpnt->sense_buffer);
*/ }
SBP2_DEBUG("Dead bit set - initiating fetch agent reset"); /*
* Check to see if the dead bit is set. If so, we'll
* have to initiate a fetch agent reset.
*/
if (STATUS_TEST_DEAD(h)) {
SBP2_DEBUG("Dead bit set - "
"initiating fetch agent reset");
sbp2_agent_reset(scsi_id, 0); sbp2_agent_reset(scsi_id, 0);
} }
SBP2_ORB_DEBUG("completing command orb %p", &command->command_orb); SBP2_ORB_DEBUG("completing command orb %p", &command->command_orb);
} }
/* /*
* Check here to see if there are no commands in-use. If there are none, we can * Check here to see if there are no commands in-use. If there
* null out last orb so that next time around we write directly to the orb pointer... * are none, we know that the fetch agent left the active state
* Quick start saves one 1394 bus transaction. * _and_ that we did not reactivate it yet. Therefore clear
* last_orb so that next time we write directly to the
* ORB_POINTER register. That way the fetch agent does not need
* to refetch the next_ORB.
*/ */
spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags); spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags);
if (list_empty(&scsi_id->sbp2_command_orb_inuse)) { if (list_empty(&scsi_id->sbp2_command_orb_inuse))
scsi_id->last_orb = NULL; scsi_id->last_orb = NULL;
}
spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags); spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
} else { } else {
/* /*
* It's probably a login/logout/reconnect status. * It's probably a login/logout/reconnect status.
*/ */
if ((scsi_id->login_orb_dma == scsi_id->status_block.ORB_offset_lo) || if ((sb->ORB_offset_lo == scsi_id->reconnect_orb_dma) ||
(scsi_id->query_logins_orb_dma == scsi_id->status_block.ORB_offset_lo) || (sb->ORB_offset_lo == scsi_id->login_orb_dma) ||
(scsi_id->reconnect_orb_dma == scsi_id->status_block.ORB_offset_lo) || (sb->ORB_offset_lo == scsi_id->query_logins_orb_dma) ||
(scsi_id->logout_orb_dma == scsi_id->status_block.ORB_offset_lo)) { (sb->ORB_offset_lo == scsi_id->logout_orb_dma)) {
atomic_set(&scsi_id->sbp2_login_complete, 1); scsi_id->access_complete = 1;
wake_up_interruptible(&access_wq);
} }
} }
if (SCpnt) { if (SCpnt) {
/* Complete the SCSI command. */
SBP2_DEBUG("Completing SCSI command"); SBP2_DEBUG("Completing SCSI command");
sbp2scsi_complete_command(scsi_id, scsi_status, SCpnt, sbp2scsi_complete_command(scsi_id, scsi_status, SCpnt,
command->Current_done); command->Current_done);
@ -2372,7 +2392,7 @@ static void sbp2scsi_complete_all_commands(struct scsi_id_instance_data *scsi_id
command = list_entry(lh, struct sbp2_command_info, list); command = list_entry(lh, struct sbp2_command_info, list);
pci_dma_sync_single_for_cpu(hi->host->pdev, command->command_orb_dma, pci_dma_sync_single_for_cpu(hi->host->pdev, command->command_orb_dma,
sizeof(struct sbp2_command_orb), sizeof(struct sbp2_command_orb),
PCI_DMA_BIDIRECTIONAL); PCI_DMA_TODEVICE);
pci_dma_sync_single_for_cpu(hi->host->pdev, command->sge_dma, pci_dma_sync_single_for_cpu(hi->host->pdev, command->sge_dma,
sizeof(command->scatter_gather_element), sizeof(command->scatter_gather_element),
PCI_DMA_BIDIRECTIONAL); PCI_DMA_BIDIRECTIONAL);
@ -2495,6 +2515,7 @@ static int sbp2scsi_slave_alloc(struct scsi_device *sdev)
(struct scsi_id_instance_data *)sdev->host->hostdata[0]; (struct scsi_id_instance_data *)sdev->host->hostdata[0];
scsi_id->sdev = sdev; scsi_id->sdev = sdev;
sdev->allow_restart = 1;
if (scsi_id->workarounds & SBP2_WORKAROUND_INQUIRY_36) if (scsi_id->workarounds & SBP2_WORKAROUND_INQUIRY_36)
sdev->inquiry_len = 36; sdev->inquiry_len = 36;
@ -2508,16 +2529,12 @@ static int sbp2scsi_slave_configure(struct scsi_device *sdev)
blk_queue_dma_alignment(sdev->request_queue, (512 - 1)); blk_queue_dma_alignment(sdev->request_queue, (512 - 1));
sdev->use_10_for_rw = 1; sdev->use_10_for_rw = 1;
sdev->use_10_for_ms = 1;
if (sdev->type == TYPE_DISK && if (sdev->type == TYPE_DISK &&
scsi_id->workarounds & SBP2_WORKAROUND_MODE_SENSE_8) scsi_id->workarounds & SBP2_WORKAROUND_MODE_SENSE_8)
sdev->skip_ms_page_8 = 1; sdev->skip_ms_page_8 = 1;
if (scsi_id->workarounds & SBP2_WORKAROUND_FIX_CAPACITY) if (scsi_id->workarounds & SBP2_WORKAROUND_FIX_CAPACITY)
sdev->fix_capacity = 1; sdev->fix_capacity = 1;
if (scsi_id->ne->guid_vendor_id == 0x0010b9 && /* Maxtor's OUI */
(sdev->type == TYPE_DISK || sdev->type == TYPE_RBC))
sdev->allow_restart = 1;
return 0; return 0;
} }
@ -2555,7 +2572,7 @@ static int sbp2scsi_abort(struct scsi_cmnd *SCpnt)
pci_dma_sync_single_for_cpu(hi->host->pdev, pci_dma_sync_single_for_cpu(hi->host->pdev,
command->command_orb_dma, command->command_orb_dma,
sizeof(struct sbp2_command_orb), sizeof(struct sbp2_command_orb),
PCI_DMA_BIDIRECTIONAL); PCI_DMA_TODEVICE);
pci_dma_sync_single_for_cpu(hi->host->pdev, pci_dma_sync_single_for_cpu(hi->host->pdev,
command->sge_dma, command->sge_dma,
sizeof(command->scatter_gather_element), sizeof(command->scatter_gather_element),
@ -2571,7 +2588,7 @@ static int sbp2scsi_abort(struct scsi_cmnd *SCpnt)
/* /*
* Initiate a fetch agent reset. * Initiate a fetch agent reset.
*/ */
sbp2_agent_reset(scsi_id, 0); sbp2_agent_reset(scsi_id, 1);
sbp2scsi_complete_all_commands(scsi_id, DID_BUS_BUSY); sbp2scsi_complete_all_commands(scsi_id, DID_BUS_BUSY);
} }
@ -2590,7 +2607,7 @@ static int sbp2scsi_reset(struct scsi_cmnd *SCpnt)
if (sbp2util_node_is_available(scsi_id)) { if (sbp2util_node_is_available(scsi_id)) {
SBP2_ERR("Generating sbp2 fetch agent reset"); SBP2_ERR("Generating sbp2 fetch agent reset");
sbp2_agent_reset(scsi_id, 0); sbp2_agent_reset(scsi_id, 1);
} }
return SUCCESS; return SUCCESS;

View file

@ -46,8 +46,8 @@
#define ORB_SET_DIRECTION(value) ((value & 0x1) << 27) #define ORB_SET_DIRECTION(value) ((value & 0x1) << 27)
struct sbp2_command_orb { struct sbp2_command_orb {
volatile u32 next_ORB_hi; u32 next_ORB_hi;
volatile u32 next_ORB_lo; u32 next_ORB_lo;
u32 data_descriptor_hi; u32 data_descriptor_hi;
u32 data_descriptor_lo; u32 data_descriptor_lo;
u32 misc; u32 misc;
@ -180,12 +180,14 @@ struct sbp2_unrestricted_page_table {
#define SBP2_SCSI_STATUS_SELECTION_TIMEOUT 0xff #define SBP2_SCSI_STATUS_SELECTION_TIMEOUT 0xff
#define STATUS_GET_ORB_OFFSET_HI(value) (value & 0xffff) #define STATUS_GET_SRC(value) (((value) >> 30) & 0x3)
#define STATUS_GET_SBP_STATUS(value) ((value >> 16) & 0xff) #define STATUS_GET_RESP(value) (((value) >> 28) & 0x3)
#define STATUS_GET_LENGTH(value) ((value >> 24) & 0x7) #define STATUS_GET_LEN(value) (((value) >> 24) & 0x7)
#define STATUS_GET_DEAD_BIT(value) ((value >> 27) & 0x1) #define STATUS_GET_SBP_STATUS(value) (((value) >> 16) & 0xff)
#define STATUS_GET_RESP(value) ((value >> 28) & 0x3) #define STATUS_GET_ORB_OFFSET_HI(value) ((value) & 0x0000ffff)
#define STATUS_GET_SRC(value) ((value >> 30) & 0x3) #define STATUS_TEST_DEAD(value) ((value) & 0x08000000)
/* test 'resp' | 'dead' | 'sbp2_status' */
#define STATUS_TEST_RDS(value) ((value) & 0x38ff0000)
struct sbp2_status_block { struct sbp2_status_block {
u32 ORB_offset_hi_misc; u32 ORB_offset_hi_misc;
@ -318,9 +320,9 @@ struct scsi_id_instance_data {
u64 status_fifo_addr; u64 status_fifo_addr;
/* /*
* Variable used for logins, reconnects, logouts, query logins * Waitqueue flag for logins, reconnects, logouts, query logins
*/ */
atomic_t sbp2_login_complete; int access_complete:1;
/* /*
* Pool of command orbs, so we can have more than overlapped command per id * Pool of command orbs, so we can have more than overlapped command per id
@ -344,6 +346,16 @@ struct scsi_id_instance_data {
/* Device specific workarounds/brokeness */ /* Device specific workarounds/brokeness */
unsigned workarounds; unsigned workarounds;
atomic_t state;
struct work_struct protocol_work;
};
/* For use in scsi_id_instance_data.state */
enum sbp2lu_state_types {
SBP2LU_STATE_RUNNING, /* all normal */
SBP2LU_STATE_IN_RESET, /* between bus reset and reconnect */
SBP2LU_STATE_IN_SHUTDOWN /* when sbp2_remove was called */
}; };
/* Sbp2 host data structure (one per IEEE1394 host) */ /* Sbp2 host data structure (one per IEEE1394 host) */
@ -390,11 +402,6 @@ static int sbp2_logout_device(struct scsi_id_instance_data *scsi_id);
static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, int destid, static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, int destid,
quadlet_t *data, u64 addr, size_t length, u16 flags); quadlet_t *data, u64 addr, size_t length, u16 flags);
static int sbp2_agent_reset(struct scsi_id_instance_data *scsi_id, int wait); static int sbp2_agent_reset(struct scsi_id_instance_data *scsi_id, int wait);
static int sbp2_link_orb_command(struct scsi_id_instance_data *scsi_id,
struct sbp2_command_info *command);
static int sbp2_send_command(struct scsi_id_instance_data *scsi_id,
struct scsi_cmnd *SCpnt,
void (*done)(struct scsi_cmnd *));
static unsigned int sbp2_status_to_sense_data(unchar *sbp2_status, static unsigned int sbp2_status_to_sense_data(unchar *sbp2_status,
unchar *sense_data); unchar *sense_data);
static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id, static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id,

View file

@ -49,16 +49,16 @@
#include <linux/compat.h> #include <linux/compat.h>
#include <linux/cdev.h> #include <linux/cdev.h>
#include "ieee1394.h"
#include "ieee1394_types.h"
#include "hosts.h"
#include "ieee1394_core.h"
#include "highlevel.h"
#include "video1394.h"
#include "nodemgr.h"
#include "dma.h" #include "dma.h"
#include "highlevel.h"
#include "hosts.h"
#include "ieee1394.h"
#include "ieee1394_core.h"
#include "ieee1394_hotplug.h"
#include "ieee1394_types.h"
#include "nodemgr.h"
#include "ohci1394.h" #include "ohci1394.h"
#include "video1394.h"
#define ISO_CHANNELS 64 #define ISO_CHANNELS 64
@ -129,7 +129,7 @@ struct file_ctx {
#define DBGMSG(card, fmt, args...) \ #define DBGMSG(card, fmt, args...) \
printk(KERN_INFO "video1394_%d: " fmt "\n" , card , ## args) printk(KERN_INFO "video1394_%d: " fmt "\n" , card , ## args)
#else #else
#define DBGMSG(card, fmt, args...) #define DBGMSG(card, fmt, args...) do {} while (0)
#endif #endif
/* print general (card independent) information */ /* print general (card independent) information */
@ -1181,7 +1181,8 @@ static int video1394_mmap(struct file *file, struct vm_area_struct *vma)
lock_kernel(); lock_kernel();
if (ctx->current_ctx == NULL) { if (ctx->current_ctx == NULL) {
PRINT(KERN_ERR, ctx->ohci->host->id, "Current iso context not set"); PRINT(KERN_ERR, ctx->ohci->host->id,
"Current iso context not set");
} else } else
res = dma_region_mmap(&ctx->current_ctx->dma, file, vma); res = dma_region_mmap(&ctx->current_ctx->dma, file, vma);
unlock_kernel(); unlock_kernel();
@ -1189,6 +1190,40 @@ static int video1394_mmap(struct file *file, struct vm_area_struct *vma)
return res; return res;
} }
static unsigned int video1394_poll(struct file *file, poll_table *pt)
{
struct file_ctx *ctx;
unsigned int mask = 0;
unsigned long flags;
struct dma_iso_ctx *d;
int i;
lock_kernel();
ctx = file->private_data;
d = ctx->current_ctx;
if (d == NULL) {
PRINT(KERN_ERR, ctx->ohci->host->id,
"Current iso context not set");
mask = POLLERR;
goto done;
}
poll_wait(file, &d->waitq, pt);
spin_lock_irqsave(&d->lock, flags);
for (i = 0; i < d->num_desc; i++) {
if (d->buffer_status[i] == VIDEO1394_BUFFER_READY) {
mask |= POLLIN | POLLRDNORM;
break;
}
}
spin_unlock_irqrestore(&d->lock, flags);
done:
unlock_kernel();
return mask;
}
static int video1394_open(struct inode *inode, struct file *file) static int video1394_open(struct inode *inode, struct file *file)
{ {
int i = ieee1394_file_to_instance(file); int i = ieee1394_file_to_instance(file);
@ -1257,6 +1292,7 @@ static struct file_operations video1394_fops=
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
.compat_ioctl = video1394_compat_ioctl, .compat_ioctl = video1394_compat_ioctl,
#endif #endif
.poll = video1394_poll,
.mmap = video1394_mmap, .mmap = video1394_mmap,
.open = video1394_open, .open = video1394_open,
.release = video1394_release .release = video1394_release