diff --git a/Documentation/ABI/testing/sysfs-kernel-slab b/Documentation/ABI/testing/sysfs-kernel-slab new file mode 100644 index 00000000000..6dcf75e594f --- /dev/null +++ b/Documentation/ABI/testing/sysfs-kernel-slab @@ -0,0 +1,479 @@ +What: /sys/kernel/slab +Date: May 2007 +KernelVersion: 2.6.22 +Contact: Pekka Enberg , + Christoph Lameter +Description: + The /sys/kernel/slab directory contains a snapshot of the + internal state of the SLUB allocator for each cache. Certain + files may be modified to change the behavior of the cache (and + any cache it aliases, if any). +Users: kernel memory tuning tools + +What: /sys/kernel/slab/cache/aliases +Date: May 2007 +KernelVersion: 2.6.22 +Contact: Pekka Enberg , + Christoph Lameter +Description: + The aliases file is read-only and specifies how many caches + have merged into this cache. + +What: /sys/kernel/slab/cache/align +Date: May 2007 +KernelVersion: 2.6.22 +Contact: Pekka Enberg , + Christoph Lameter +Description: + The align file is read-only and specifies the cache's object + alignment in bytes. + +What: /sys/kernel/slab/cache/alloc_calls +Date: May 2007 +KernelVersion: 2.6.22 +Contact: Pekka Enberg , + Christoph Lameter +Description: + The alloc_calls file is read-only and lists the kernel code + locations from which allocations for this cache were performed. + The alloc_calls file only contains information if debugging is + enabled for that cache (see Documentation/vm/slub.txt). + +What: /sys/kernel/slab/cache/alloc_fastpath +Date: February 2008 +KernelVersion: 2.6.25 +Contact: Pekka Enberg , + Christoph Lameter +Description: + The alloc_fastpath file is read-only and specifies how many + objects have been allocated using the fast path. + Available when CONFIG_SLUB_STATS is enabled. + +What: /sys/kernel/slab/cache/alloc_from_partial +Date: February 2008 +KernelVersion: 2.6.25 +Contact: Pekka Enberg , + Christoph Lameter +Description: + The alloc_from_partial file is read-only and specifies how + many times a cpu slab has been full and it has been refilled + by using a slab from the list of partially used slabs. + Available when CONFIG_SLUB_STATS is enabled. + +What: /sys/kernel/slab/cache/alloc_refill +Date: February 2008 +KernelVersion: 2.6.25 +Contact: Pekka Enberg , + Christoph Lameter +Description: + The alloc_refill file is read-only and specifies how many + times the per-cpu freelist was empty but there were objects + available as the result of remote cpu frees. + Available when CONFIG_SLUB_STATS is enabled. + +What: /sys/kernel/slab/cache/alloc_slab +Date: February 2008 +KernelVersion: 2.6.25 +Contact: Pekka Enberg , + Christoph Lameter +Description: + The alloc_slab file is read-only and specifies how many times + a new slab had to be allocated from the page allocator. + Available when CONFIG_SLUB_STATS is enabled. + +What: /sys/kernel/slab/cache/alloc_slowpath +Date: February 2008 +KernelVersion: 2.6.25 +Contact: Pekka Enberg , + Christoph Lameter +Description: + The alloc_slowpath file is read-only and specifies how many + objects have been allocated using the slow path because of a + refill or allocation from a partial or new slab. + Available when CONFIG_SLUB_STATS is enabled. + +What: /sys/kernel/slab/cache/cache_dma +Date: May 2007 +KernelVersion: 2.6.22 +Contact: Pekka Enberg , + Christoph Lameter +Description: + The cache_dma file is read-only and specifies whether objects + are from ZONE_DMA. + Available when CONFIG_ZONE_DMA is enabled. + +What: /sys/kernel/slab/cache/cpu_slabs +Date: May 2007 +KernelVersion: 2.6.22 +Contact: Pekka Enberg , + Christoph Lameter +Description: + The cpu_slabs file is read-only and displays how many cpu slabs + are active and their NUMA locality. + +What: /sys/kernel/slab/cache/cpuslab_flush +Date: April 2009 +KernelVersion: 2.6.31 +Contact: Pekka Enberg , + Christoph Lameter +Description: + The file cpuslab_flush is read-only and specifies how many + times a cache's cpu slabs have been flushed as the result of + destroying or shrinking a cache, a cpu going offline, or as + the result of forcing an allocation from a certain node. + Available when CONFIG_SLUB_STATS is enabled. + +What: /sys/kernel/slab/cache/ctor +Date: May 2007 +KernelVersion: 2.6.22 +Contact: Pekka Enberg , + Christoph Lameter +Description: + The ctor file is read-only and specifies the cache's object + constructor function, which is invoked for each object when a + new slab is allocated. + +What: /sys/kernel/slab/cache/deactivate_empty +Date: February 2008 +KernelVersion: 2.6.25 +Contact: Pekka Enberg , + Christoph Lameter +Description: + The file deactivate_empty is read-only and specifies how many + times an empty cpu slab was deactivated. + Available when CONFIG_SLUB_STATS is enabled. + +What: /sys/kernel/slab/cache/deactivate_full +Date: February 2008 +KernelVersion: 2.6.25 +Contact: Pekka Enberg , + Christoph Lameter +Description: + The file deactivate_full is read-only and specifies how many + times a full cpu slab was deactivated. + Available when CONFIG_SLUB_STATS is enabled. + +What: /sys/kernel/slab/cache/deactivate_remote_frees +Date: February 2008 +KernelVersion: 2.6.25 +Contact: Pekka Enberg , + Christoph Lameter +Description: + The file deactivate_remote_frees is read-only and specifies how + many times a cpu slab has been deactivated and contained free + objects that were freed remotely. + Available when CONFIG_SLUB_STATS is enabled. + +What: /sys/kernel/slab/cache/deactivate_to_head +Date: February 2008 +KernelVersion: 2.6.25 +Contact: Pekka Enberg , + Christoph Lameter +Description: + The file deactivate_to_head is read-only and specifies how + many times a partial cpu slab was deactivated and added to the + head of its node's partial list. + Available when CONFIG_SLUB_STATS is enabled. + +What: /sys/kernel/slab/cache/deactivate_to_tail +Date: February 2008 +KernelVersion: 2.6.25 +Contact: Pekka Enberg , + Christoph Lameter +Description: + The file deactivate_to_tail is read-only and specifies how + many times a partial cpu slab was deactivated and added to the + tail of its node's partial list. + Available when CONFIG_SLUB_STATS is enabled. + +What: /sys/kernel/slab/cache/destroy_by_rcu +Date: May 2007 +KernelVersion: 2.6.22 +Contact: Pekka Enberg , + Christoph Lameter +Description: + The destroy_by_rcu file is read-only and specifies whether + slabs (not objects) are freed by rcu. + +What: /sys/kernel/slab/cache/free_add_partial +Date: February 2008 +KernelVersion: 2.6.25 +Contact: Pekka Enberg , + Christoph Lameter +Description: + The file free_add_partial is read-only and specifies how many + times an object has been freed in a full slab so that it had to + added to its node's partial list. + Available when CONFIG_SLUB_STATS is enabled. + +What: /sys/kernel/slab/cache/free_calls +Date: May 2007 +KernelVersion: 2.6.22 +Contact: Pekka Enberg , + Christoph Lameter +Description: + The free_calls file is read-only and lists the locations of + object frees if slab debugging is enabled (see + Documentation/vm/slub.txt). + +What: /sys/kernel/slab/cache/free_fastpath +Date: February 2008 +KernelVersion: 2.6.25 +Contact: Pekka Enberg , + Christoph Lameter +Description: + The free_fastpath file is read-only and specifies how many + objects have been freed using the fast path because it was an + object from the cpu slab. + Available when CONFIG_SLUB_STATS is enabled. + +What: /sys/kernel/slab/cache/free_frozen +Date: February 2008 +KernelVersion: 2.6.25 +Contact: Pekka Enberg , + Christoph Lameter +Description: + The free_frozen file is read-only and specifies how many + objects have been freed to a frozen slab (i.e. a remote cpu + slab). + Available when CONFIG_SLUB_STATS is enabled. + +What: /sys/kernel/slab/cache/free_remove_partial +Date: February 2008 +KernelVersion: 2.6.25 +Contact: Pekka Enberg , + Christoph Lameter +Description: + The file free_remove_partial is read-only and specifies how + many times an object has been freed to a now-empty slab so + that it had to be removed from its node's partial list. + Available when CONFIG_SLUB_STATS is enabled. + +What: /sys/kernel/slab/cache/free_slab +Date: February 2008 +KernelVersion: 2.6.25 +Contact: Pekka Enberg , + Christoph Lameter +Description: + The free_slab file is read-only and specifies how many times an + empty slab has been freed back to the page allocator. + Available when CONFIG_SLUB_STATS is enabled. + +What: /sys/kernel/slab/cache/free_slowpath +Date: February 2008 +KernelVersion: 2.6.25 +Contact: Pekka Enberg , + Christoph Lameter +Description: + The free_slowpath file is read-only and specifies how many + objects have been freed using the slow path (i.e. to a full or + partial slab). + Available when CONFIG_SLUB_STATS is enabled. + +What: /sys/kernel/slab/cache/hwcache_align +Date: May 2007 +KernelVersion: 2.6.22 +Contact: Pekka Enberg , + Christoph Lameter +Description: + The hwcache_align file is read-only and specifies whether + objects are aligned on cachelines. + +What: /sys/kernel/slab/cache/min_partial +Date: February 2009 +KernelVersion: 2.6.30 +Contact: Pekka Enberg , + David Rientjes +Description: + The min_partial file specifies how many empty slabs shall + remain on a node's partial list to avoid the overhead of + allocating new slabs. Such slabs may be reclaimed by utilizing + the shrink file. + +What: /sys/kernel/slab/cache/object_size +Date: May 2007 +KernelVersion: 2.6.22 +Contact: Pekka Enberg , + Christoph Lameter +Description: + The object_size file is read-only and specifies the cache's + object size. + +What: /sys/kernel/slab/cache/objects +Date: May 2007 +KernelVersion: 2.6.22 +Contact: Pekka Enberg , + Christoph Lameter +Description: + The objects file is read-only and displays how many objects are + active and from which nodes they are from. + +What: /sys/kernel/slab/cache/objects_partial +Date: April 2008 +KernelVersion: 2.6.26 +Contact: Pekka Enberg , + Christoph Lameter +Description: + The objects_partial file is read-only and displays how many + objects are on partial slabs and from which nodes they are + from. + +What: /sys/kernel/slab/cache/objs_per_slab +Date: May 2007 +KernelVersion: 2.6.22 +Contact: Pekka Enberg , + Christoph Lameter +Description: + The file objs_per_slab is read-only and specifies how many + objects may be allocated from a single slab of the order + specified in /sys/kernel/slab/cache/order. + +What: /sys/kernel/slab/cache/order +Date: May 2007 +KernelVersion: 2.6.22 +Contact: Pekka Enberg , + Christoph Lameter +Description: + The order file specifies the page order at which new slabs are + allocated. It is writable and can be changed to increase the + number of objects per slab. If a slab cannot be allocated + because of fragmentation, SLUB will retry with the minimum order + possible depending on its characteristics. + +What: /sys/kernel/slab/cache/order_fallback +Date: April 2008 +KernelVersion: 2.6.26 +Contact: Pekka Enberg , + Christoph Lameter +Description: + The file order_fallback is read-only and specifies how many + times an allocation of a new slab has not been possible at the + cache's order and instead fallen back to its minimum possible + order. + Available when CONFIG_SLUB_STATS is enabled. + +What: /sys/kernel/slab/cache/partial +Date: May 2007 +KernelVersion: 2.6.22 +Contact: Pekka Enberg , + Christoph Lameter +Description: + The partial file is read-only and displays how long many + partial slabs there are and how long each node's list is. + +What: /sys/kernel/slab/cache/poison +Date: May 2007 +KernelVersion: 2.6.22 +Contact: Pekka Enberg , + Christoph Lameter +Description: + The poison file specifies whether objects should be poisoned + when a new slab is allocated. + +What: /sys/kernel/slab/cache/reclaim_account +Date: May 2007 +KernelVersion: 2.6.22 +Contact: Pekka Enberg , + Christoph Lameter +Description: + The reclaim_account file specifies whether the cache's objects + are reclaimable (and grouped by their mobility). + +What: /sys/kernel/slab/cache/red_zone +Date: May 2007 +KernelVersion: 2.6.22 +Contact: Pekka Enberg , + Christoph Lameter +Description: + The red_zone file specifies whether the cache's objects are red + zoned. + +What: /sys/kernel/slab/cache/remote_node_defrag_ratio +Date: January 2008 +KernelVersion: 2.6.25 +Contact: Pekka Enberg , + Christoph Lameter +Description: + The file remote_node_defrag_ratio specifies the percentage of + times SLUB will attempt to refill the cpu slab with a partial + slab from a remote node as opposed to allocating a new slab on + the local node. This reduces the amount of wasted memory over + the entire system but can be expensive. + Available when CONFIG_NUMA is enabled. + +What: /sys/kernel/slab/cache/sanity_checks +Date: May 2007 +KernelVersion: 2.6.22 +Contact: Pekka Enberg , + Christoph Lameter +Description: + The sanity_checks file specifies whether expensive checks + should be performed on free and, at minimum, enables double free + checks. Caches that enable sanity_checks cannot be merged with + caches that do not. + +What: /sys/kernel/slab/cache/shrink +Date: May 2007 +KernelVersion: 2.6.22 +Contact: Pekka Enberg , + Christoph Lameter +Description: + The shrink file is written when memory should be reclaimed from + a cache. Empty partial slabs are freed and the partial list is + sorted so the slabs with the fewest available objects are used + first. + +What: /sys/kernel/slab/cache/slab_size +Date: May 2007 +KernelVersion: 2.6.22 +Contact: Pekka Enberg , + Christoph Lameter +Description: + The slab_size file is read-only and specifies the object size + with metadata (debugging information and alignment) in bytes. + +What: /sys/kernel/slab/cache/slabs +Date: May 2007 +KernelVersion: 2.6.22 +Contact: Pekka Enberg , + Christoph Lameter +Description: + The slabs file is read-only and displays how long many slabs + there are (both cpu and partial) and from which nodes they are + from. + +What: /sys/kernel/slab/cache/store_user +Date: May 2007 +KernelVersion: 2.6.22 +Contact: Pekka Enberg , + Christoph Lameter +Description: + The store_user file specifies whether the location of + allocation or free should be tracked for a cache. + +What: /sys/kernel/slab/cache/total_objects +Date: April 2008 +KernelVersion: 2.6.26 +Contact: Pekka Enberg , + Christoph Lameter +Description: + The total_objects file is read-only and displays how many total + objects a cache has and from which nodes they are from. + +What: /sys/kernel/slab/cache/trace +Date: May 2007 +KernelVersion: 2.6.22 +Contact: Pekka Enberg , + Christoph Lameter +Description: + The trace file specifies whether object allocations and frees + should be traced. + +What: /sys/kernel/slab/cache/validate +Date: May 2007 +KernelVersion: 2.6.22 +Contact: Pekka Enberg , + Christoph Lameter +Description: + Writing to the validate file causes SLUB to traverse all of its + cache's objects and check the validity of metadata. diff --git a/Documentation/DocBook/kgdb.tmpl b/Documentation/DocBook/kgdb.tmpl index 372dec20c8d..5cff41a5fa7 100644 --- a/Documentation/DocBook/kgdb.tmpl +++ b/Documentation/DocBook/kgdb.tmpl @@ -281,7 +281,7 @@ seriously wrong while debugging, it will most often be the case that you want to enable gdb to be verbose about its target communications. You do this prior to issuing the target - remote command by typing in: set remote debug 1 + remote command by typing in: set debug remote 1 diff --git a/Documentation/filesystems/tmpfs.txt b/Documentation/filesystems/tmpfs.txt index 222437efd75..3015da0c6b2 100644 --- a/Documentation/filesystems/tmpfs.txt +++ b/Documentation/filesystems/tmpfs.txt @@ -133,4 +133,4 @@ RAM/SWAP in 10240 inodes and it is only accessible by root. Author: Christoph Rohland , 1.12.01 Updated: - Hugh Dickins , 4 June 2007 + Hugh Dickins, 4 June 2007 diff --git a/Documentation/hwmon/sysfs-interface b/Documentation/hwmon/sysfs-interface index 2f10ce6a879..004ee161721 100644 --- a/Documentation/hwmon/sysfs-interface +++ b/Documentation/hwmon/sysfs-interface @@ -150,6 +150,11 @@ fan[1-*]_min Fan minimum value Unit: revolution/min (RPM) RW +fan[1-*]_max Fan maximum value + Unit: revolution/min (RPM) + Only rarely supported by the hardware. + RW + fan[1-*]_input Fan input value. Unit: revolution/min (RPM) RO @@ -390,6 +395,7 @@ OR in[0-*]_min_alarm in[0-*]_max_alarm fan[1-*]_min_alarm +fan[1-*]_max_alarm temp[1-*]_min_alarm temp[1-*]_max_alarm temp[1-*]_crit_alarm diff --git a/Documentation/input/multi-touch-protocol.txt b/Documentation/input/multi-touch-protocol.txt index 9f09557aea3..a12ea3b586e 100644 --- a/Documentation/input/multi-touch-protocol.txt +++ b/Documentation/input/multi-touch-protocol.txt @@ -18,8 +18,12 @@ Usage Anonymous finger details are sent sequentially as separate packets of ABS events. Only the ABS_MT events are recognized as part of a finger packet. The end of a packet is marked by calling the input_mt_sync() -function, which generates a SYN_MT_REPORT event. The end of multi-touch -transfer is marked by calling the usual input_sync() function. +function, which generates a SYN_MT_REPORT event. This instructs the +receiver to accept the data for the current finger and prepare to receive +another. The end of a multi-touch transfer is marked by calling the usual +input_sync() function. This instructs the receiver to act upon events +accumulated since last EV_SYN/SYN_REPORT and prepare to receive a new +set of events/packets. A set of ABS_MT events with the desired properties is defined. The events are divided into categories, to allow for partial implementation. The @@ -27,11 +31,26 @@ minimum set consists of ABS_MT_TOUCH_MAJOR, ABS_MT_POSITION_X and ABS_MT_POSITION_Y, which allows for multiple fingers to be tracked. If the device supports it, the ABS_MT_WIDTH_MAJOR may be used to provide the size of the approaching finger. Anisotropy and direction may be specified with -ABS_MT_TOUCH_MINOR, ABS_MT_WIDTH_MINOR and ABS_MT_ORIENTATION. Devices with -more granular information may specify general shapes as blobs, i.e., as a -sequence of rectangular shapes grouped together by an -ABS_MT_BLOB_ID. Finally, the ABS_MT_TOOL_TYPE may be used to specify -whether the touching tool is a finger or a pen or something else. +ABS_MT_TOUCH_MINOR, ABS_MT_WIDTH_MINOR and ABS_MT_ORIENTATION. The +ABS_MT_TOOL_TYPE may be used to specify whether the touching tool is a +finger or a pen or something else. Devices with more granular information +may specify general shapes as blobs, i.e., as a sequence of rectangular +shapes grouped together by an ABS_MT_BLOB_ID. Finally, for the few devices +that currently support it, the ABS_MT_TRACKING_ID event may be used to +report finger tracking from hardware [5]. + +Here is what a minimal event sequence for a two-finger touch would look +like: + + ABS_MT_TOUCH_MAJOR + ABS_MT_POSITION_X + ABS_MT_POSITION_Y + SYN_MT_REPORT + ABS_MT_TOUCH_MAJOR + ABS_MT_POSITION_X + ABS_MT_POSITION_Y + SYN_MT_REPORT + SYN_REPORT Event Semantics @@ -44,24 +63,24 @@ ABS_MT_TOUCH_MAJOR The length of the major axis of the contact. The length should be given in surface units. If the surface has an X times Y resolution, the largest -possible value of ABS_MT_TOUCH_MAJOR is sqrt(X^2 + Y^2), the diagonal. +possible value of ABS_MT_TOUCH_MAJOR is sqrt(X^2 + Y^2), the diagonal [4]. ABS_MT_TOUCH_MINOR The length, in surface units, of the minor axis of the contact. If the -contact is circular, this event can be omitted. +contact is circular, this event can be omitted [4]. ABS_MT_WIDTH_MAJOR The length, in surface units, of the major axis of the approaching tool. This should be understood as the size of the tool itself. The orientation of the contact and the approaching tool are assumed to be the -same. +same [4]. ABS_MT_WIDTH_MINOR The length, in surface units, of the minor axis of the approaching -tool. Omit if circular. +tool. Omit if circular [4]. The above four values can be used to derive additional information about the contact. The ratio ABS_MT_TOUCH_MAJOR / ABS_MT_WIDTH_MAJOR approximates @@ -70,14 +89,17 @@ different characteristic widths [1]. ABS_MT_ORIENTATION -The orientation of the ellipse. The value should describe half a revolution -clockwise around the touch center. The scale of the value is arbitrary, but -zero should be returned for an ellipse aligned along the Y axis of the -surface. As an example, an index finger placed straight onto the axis could -return zero orientation, something negative when twisted to the left, and -something positive when twisted to the right. This value can be omitted if -the touching object is circular, or if the information is not available in -the kernel driver. +The orientation of the ellipse. The value should describe a signed quarter +of a revolution clockwise around the touch center. The signed value range +is arbitrary, but zero should be returned for a finger aligned along the Y +axis of the surface, a negative value when finger is turned to the left, and +a positive value when finger turned to the right. When completely aligned with +the X axis, the range max should be returned. Orientation can be omitted +if the touching object is circular, or if the information is not available +in the kernel driver. Partial orientation support is possible if the device +can distinguish between the two axis, but not (uniquely) any values in +between. In such cases, the range of ABS_MT_ORIENTATION should be [0, 1] +[4]. ABS_MT_POSITION_X @@ -98,8 +120,35 @@ ABS_MT_BLOB_ID The BLOB_ID groups several packets together into one arbitrarily shaped contact. This is a low-level anonymous grouping, and should not be confused -with the high-level contactID, explained below. Most kernel drivers will -not have this capability, and can safely omit the event. +with the high-level trackingID [5]. Most kernel drivers will not have blob +capability, and can safely omit the event. + +ABS_MT_TRACKING_ID + +The TRACKING_ID identifies an initiated contact throughout its life cycle +[5]. There are currently only a few devices that support it, so this event +should normally be omitted. + + +Event Computation +----------------- + +The flora of different hardware unavoidably leads to some devices fitting +better to the MT protocol than others. To simplify and unify the mapping, +this section gives recipes for how to compute certain events. + +For devices reporting contacts as rectangular shapes, signed orientation +cannot be obtained. Assuming X and Y are the lengths of the sides of the +touching rectangle, here is a simple formula that retains the most +information possible: + + ABS_MT_TOUCH_MAJOR := max(X, Y) + ABS_MT_TOUCH_MINOR := min(X, Y) + ABS_MT_ORIENTATION := bool(X > Y) + +The range of ABS_MT_ORIENTATION should be set to [0, 1], to indicate that +the device can distinguish between a finger along the Y axis (0) and a +finger along the X axis (1). Finger Tracking @@ -109,14 +158,18 @@ The kernel driver should generate an arbitrary enumeration of the set of anonymous contacts currently on the surface. The order in which the packets appear in the event stream is not important. -The process of finger tracking, i.e., to assign a unique contactID to each +The process of finger tracking, i.e., to assign a unique trackingID to each initiated contact on the surface, is left to user space; preferably the -multi-touch X driver [3]. In that driver, the contactID stays the same and +multi-touch X driver [3]. In that driver, the trackingID stays the same and unique until the contact vanishes (when the finger leaves the surface). The problem of assigning a set of anonymous fingers to a set of identified fingers is a euclidian bipartite matching problem at each event update, and relies on a sufficiently rapid update rate. +There are a few devices that support trackingID in hardware. User space can +make use of these native identifiers to reduce bandwidth and cpu usage. + + Notes ----- @@ -136,5 +189,7 @@ could be used to derive tilt. time of writing (April 2009), the MT protocol is not yet merged, and the prototype implements finger matching, basic mouse support and two-finger scrolling. The project aims at improving the quality of current multi-touch -functionality available in the synaptics X driver, and in addition +functionality available in the Synaptics X driver, and in addition implement more advanced gestures. +[4] See the section on event computation. +[5] See the section on finger tracking. diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index e87bdbfbcc7..fd5cac01303 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -1535,6 +1535,10 @@ and is between 256 and 4096 characters. It is defined in the file register save and restore. The kernel will only save legacy floating-point registers on task switch. + noxsave [BUGS=X86] Disables x86 extended register state save + and restore using xsave. The kernel will fallback to + enabling legacy floating-point and sse state. + nohlt [BUGS=ARM,SH] Tells the kernel that the sleep(SH) or wfi(ARM) instruction doesn't work correctly and not to use it. This is also useful when using JTAG debugger. diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index ec5de02f543..b121c5db707 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -1266,13 +1266,22 @@ sctp_rmem - vector of 3 INTEGERs: min, default, max sctp_wmem - vector of 3 INTEGERs: min, default, max See tcp_wmem for a description. -UNDOCUMENTED: /proc/sys/net/core/* - dev_weight FIXME +dev_weight - INTEGER + The maximum number of packets that kernel can handle on a NAPI + interrupt, it's a Per-CPU variable. + + Default: 64 /proc/sys/net/unix/* - max_dgram_qlen FIXME +max_dgram_qlen - INTEGER + The maximum length of dgram socket receive queue + + Default: 10 + + +UNDOCUMENTED: /proc/sys/net/irda/* fast_poll_increase FIXME diff --git a/Documentation/sound/alsa/HD-Audio-Models.txt b/Documentation/sound/alsa/HD-Audio-Models.txt index 8eec05bc079..322869fc8a9 100644 --- a/Documentation/sound/alsa/HD-Audio-Models.txt +++ b/Documentation/sound/alsa/HD-Audio-Models.txt @@ -334,6 +334,7 @@ STAC9227/9228/9229/927x ref-no-jd Reference board without HP/Mic jack detection 3stack D965 3stack 5stack D965 5stack + SPDIF + 5stack-no-fp D965 5stack without front panel dell-3stack Dell Dimension E520 dell-bios Fixes with Dell BIOS setup auto BIOS setup (default) diff --git a/Documentation/sound/alsa/Procfile.txt b/Documentation/sound/alsa/Procfile.txt index bba2dbb79d8..cfac20cf9e3 100644 --- a/Documentation/sound/alsa/Procfile.txt +++ b/Documentation/sound/alsa/Procfile.txt @@ -104,6 +104,11 @@ card*/pcm*/xrun_debug When this value is greater than 1, the driver will show the stack trace additionally. This may help the debugging. + Since 2.6.30, this option also enables the hwptr check using + jiffies. This detects spontaneous invalid pointer callback + values, but can be lead to too much corrections for a (mostly + buggy) hardware that doesn't give smooth pointer updates. + card*/pcm*/sub*/info The general information of this PCM sub-stream. diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt index b716d33912d..c302ddf629a 100644 --- a/Documentation/sysctl/vm.txt +++ b/Documentation/sysctl/vm.txt @@ -39,8 +39,6 @@ Currently, these files are in /proc/sys/vm: - nr_hugepages - nr_overcommit_hugepages - nr_pdflush_threads -- nr_pdflush_threads_min -- nr_pdflush_threads_max - nr_trim_pages (only if CONFIG_MMU=n) - numa_zonelist_order - oom_dump_tasks @@ -469,32 +467,6 @@ The default value is 0. ============================================================== -nr_pdflush_threads_min - -This value controls the minimum number of pdflush threads. - -At boot time, the kernel will create and maintain 'nr_pdflush_threads_min' -threads for the kernel's lifetime. - -The default value is 2. The minimum value you can specify is 1, and -the maximum value is the current setting of 'nr_pdflush_threads_max'. - -See 'nr_pdflush_threads_max' below for more information. - -============================================================== - -nr_pdflush_threads_max - -This value controls the maximum number of pdflush threads that can be -created. The pdflush algorithm will create a new pdflush thread (up to -this maximum) if no pdflush threads have been available for >= 1 second. - -The default value is 8. The minimum value you can specify is the -current value of 'nr_pdflush_threads_min' and the -maximum is 1000. - -============================================================== - overcommit_memory: This value contains a flag that enables memory overcommitment. diff --git a/Documentation/sysfs-rules.txt b/Documentation/sysfs-rules.txt index 6049a2a84dd..5d8bc2cd250 100644 --- a/Documentation/sysfs-rules.txt +++ b/Documentation/sysfs-rules.txt @@ -113,7 +113,7 @@ versions of the sysfs interface. "devices" directory at /sys/subsystem//devices. If /sys/subsystem exists, /sys/bus, /sys/class and /sys/block can be - ignored. If it does not exist, you have always to scan all three + ignored. If it does not exist, you always have to scan all three places, as the kernel is free to move a subsystem from one place to the other, as long as the devices are still reachable by the same subsystem name. diff --git a/MAINTAINERS b/MAINTAINERS index 2b349ba4add..cf4abddfc8a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -434,7 +434,7 @@ F: arch/alpha/ AMD GEODE CS5536 USB DEVICE CONTROLLER DRIVER P: Thomas Dahlmann -M: thomas.dahlmann@amd.com +M: dahlmann.thomas@arcor.de L: linux-geode@lists.infradead.org (moderated for non-subscribers) S: Supported F: drivers/usb/gadget/amd5536udc.* @@ -624,6 +624,7 @@ M: paulius.zaleckas@teltonika.lt L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) T: git git://gitorious.org/linux-gemini/mainline.git S: Maintained +F: arch/arm/mach-gemini/ ARM/EBSA110 MACHINE SUPPORT P: Russell King @@ -650,6 +651,7 @@ P: Paulius Zaleckas M: paulius.zaleckas@teltonika.lt L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) S: Maintained +F: arch/arm/mm/*-fa* ARM/FOOTBRIDGE ARCHITECTURE P: Russell King @@ -1132,17 +1134,17 @@ F: fs/bfs/ F: include/linux/bfs_fs.h BLACKFIN ARCHITECTURE -P: Bryan Wu -M: cooloney@kernel.org +P: Mike Frysinger +M: vapier@gentoo.org L: uclinux-dist-devel@blackfin.uclinux.org W: http://blackfin.uclinux.org S: Supported F: arch/blackfin/ BLACKFIN EMAC DRIVER -P: Bryan Wu -M: cooloney@kernel.org -L: uclinux-dist-devel@blackfin.uclinux.org (subscribers-only) +P: Michael Hennerich +M: michael.hennerich@analog.com +L: uclinux-dist-devel@blackfin.uclinux.org W: http://blackfin.uclinux.org S: Supported F: drivers/net/bfin_mac.* @@ -1150,7 +1152,7 @@ F: drivers/net/bfin_mac.* BLACKFIN RTC DRIVER P: Mike Frysinger M: vapier.adi@gmail.com -L: uclinux-dist-devel@blackfin.uclinux.org (subscribers-only) +L: uclinux-dist-devel@blackfin.uclinux.org W: http://blackfin.uclinux.org S: Supported F: drivers/rtc/rtc-bfin.c @@ -1158,7 +1160,7 @@ F: drivers/rtc/rtc-bfin.c BLACKFIN SERIAL DRIVER P: Sonic Zhang M: sonic.zhang@analog.com -L: uclinux-dist-devel@blackfin.uclinux.org (subscribers-only) +L: uclinux-dist-devel@blackfin.uclinux.org W: http://blackfin.uclinux.org S: Supported F: drivers/serial/bfin_5xx.c @@ -1166,7 +1168,7 @@ F: drivers/serial/bfin_5xx.c BLACKFIN WATCHDOG DRIVER P: Mike Frysinger M: vapier.adi@gmail.com -L: uclinux-dist-devel@blackfin.uclinux.org (subscribers-only) +L: uclinux-dist-devel@blackfin.uclinux.org W: http://blackfin.uclinux.org S: Supported F: drivers/watchdog/bfin_wdt.c @@ -1174,7 +1176,7 @@ F: drivers/watchdog/bfin_wdt.c BLACKFIN I2C TWI DRIVER P: Sonic Zhang M: sonic.zhang@analog.com -L: uclinux-dist-devel@blackfin.uclinux.org (subscribers-only) +L: uclinux-dist-devel@blackfin.uclinux.org W: http://blackfin.uclinux.org/ S: Supported F: drivers/i2c/busses/i2c-bfin-twi.c @@ -1431,6 +1433,14 @@ P: Russell King M: linux@arm.linux.org.uk F: include/linux/clk.h +CISCO FCOE HBA DRIVER +P: Abhijeet Joglekar +M: abjoglek@cisco.com +P: Joe Eykholt +M: jeykholt@cisco.com +L: linux-scsi@vger.kernel.org +S: Supported + CODA FILE SYSTEM P: Jan Harkes M: jaharkes@cs.cmu.edu @@ -1532,6 +1542,13 @@ W: http://www.fi.muni.cz/~kas/cosa/ S: Maintained F: drivers/net/wan/cosa* +CPMAC ETHERNET DRIVER +P: Florian Fainelli +M: florian@openwrt.org +L: netdev@vger.kernel.org +S: Maintained +F: drivers/net/cpmac.c + CPU FREQUENCY DRIVERS P: Dave Jones M: davej@redhat.com @@ -1963,8 +1980,8 @@ F: include/linux/edac.h EDAC-E752X P: Mark Gross -P: Doug Thompson M: mark.gross@intel.com +P: Doug Thompson M: dougthompson@xmission.com L: bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers) W: bluesmoke.sourceforge.net @@ -2241,7 +2258,7 @@ P: Li Yang M: leoli@freescale.com P: Zhang Wei M: zw@zh-kernel.org -L: linuxppc-embedded@ozlabs.org +L: linuxppc-dev@ozlabs.org L: linux-kernel@vger.kernel.org S: Maintained F: drivers/dma/fsldma.* @@ -5579,6 +5596,14 @@ M: ian@mnementh.co.uk S: Maintained F: drivers/mmc/host/tmio_mmc.* +TMPFS (SHMEM FILESYSTEM) +P: Hugh Dickins +M: hugh.dickins@tiscali.co.uk +L: linux-mm@kvack.org +S: Maintained +F: include/linux/shmem_fs.h +F: mm/shmem.c + TPM DEVICE DRIVER P: Debora Velarde M: debora@linux.vnet.ibm.com diff --git a/Makefile b/Makefile index 9b2b58c3b3d..03373bb703c 100644 --- a/Makefile +++ b/Makefile @@ -1,8 +1,8 @@ VERSION = 2 PATCHLEVEL = 6 SUBLEVEL = 30 -EXTRAVERSION = -rc5 -NAME = Vindictive Armadillo +EXTRAVERSION = +NAME = Man-Eating Seals of Antiquity # *DOCUMENTATION* # To see a list of typical targets execute "make help" @@ -533,7 +533,7 @@ endif include $(srctree)/arch/$(SRCARCH)/Makefile -ifneq (CONFIG_FRAME_WARN,0) +ifneq ($(CONFIG_FRAME_WARN),0) KBUILD_CFLAGS += $(call cc-option,-Wframe-larger-than=${CONFIG_FRAME_WARN}) endif diff --git a/arch/alpha/include/asm/percpu.h b/arch/alpha/include/asm/percpu.h index e9e0bb5a23b..06c5c7a4afd 100644 --- a/arch/alpha/include/asm/percpu.h +++ b/arch/alpha/include/asm/percpu.h @@ -1,7 +1,9 @@ #ifndef __ALPHA_PERCPU_H #define __ALPHA_PERCPU_H + #include #include +#include /* * Determine the real variable name from the name visible in the @@ -73,6 +75,28 @@ extern unsigned long __per_cpu_offset[NR_CPUS]; #endif /* SMP */ -#include +#ifdef CONFIG_SMP +#define PER_CPU_BASE_SECTION ".data.percpu" +#else +#define PER_CPU_BASE_SECTION ".data" +#endif + +#ifdef CONFIG_SMP + +#ifdef MODULE +#define PER_CPU_SHARED_ALIGNED_SECTION "" +#else +#define PER_CPU_SHARED_ALIGNED_SECTION ".shared_aligned" +#endif +#define PER_CPU_FIRST_SECTION ".first" + +#else + +#define PER_CPU_SHARED_ALIGNED_SECTION "" +#define PER_CPU_FIRST_SECTION "" + +#endif + +#define PER_CPU_ATTRIBUTES #endif /* __ALPHA_PERCPU_H */ diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index e60ec54df33..9d02cdb15b2 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -273,6 +273,7 @@ config ARCH_EP93XX select HAVE_CLK select COMMON_CLKDEV select ARCH_REQUIRE_GPIOLIB + select ARCH_HAS_HOLES_MEMORYMODEL help This enables support for the Cirrus EP93xx series of CPUs. @@ -976,10 +977,9 @@ config OABI_COMPAT UNPREDICTABLE (in fact it can be predicted that it won't work at all). If in doubt say Y. -config ARCH_FLATMEM_HAS_HOLES +config ARCH_HAS_HOLES_MEMORYMODEL bool - default y - depends on FLATMEM + default n # Discontigmem is deprecated config ARCH_DISCONTIGMEM_ENABLE diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c index c6884ba1d5e..3e1714c6523 100644 --- a/arch/arm/common/gic.c +++ b/arch/arm/common/gic.c @@ -253,9 +253,9 @@ void __cpuinit gic_cpu_init(unsigned int gic_nr, void __iomem *base) } #ifdef CONFIG_SMP -void gic_raise_softirq(cpumask_t cpumask, unsigned int irq) +void gic_raise_softirq(const struct cpumask *mask, unsigned int irq) { - unsigned long map = *cpus_addr(cpumask); + unsigned long map = *cpus_addr(*mask); /* this always happens on GIC0 */ writel(map << 16 | irq, gic_data[0].dist_base + GIC_DIST_SOFTINT); diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index 6116e4893c0..15f8a092b70 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -114,3 +114,16 @@ .align 3; \ .long 9999b,9001f; \ .previous + +/* + * SMP data memory barrier + */ + .macro smp_dmb +#ifdef CONFIG_SMP +#if __LINUX_ARM_ARCH__ >= 7 + dmb +#elif __LINUX_ARM_ARCH__ == 6 + mcr p15, 0, r0, c7, c10, 5 @ dmb +#endif +#endif + .endm diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h index ee99723b3a6..16b52f39798 100644 --- a/arch/arm/include/asm/atomic.h +++ b/arch/arm/include/asm/atomic.h @@ -44,11 +44,29 @@ static inline void atomic_set(atomic_t *v, int i) : "cc"); } +static inline void atomic_add(int i, atomic_t *v) +{ + unsigned long tmp; + int result; + + __asm__ __volatile__("@ atomic_add\n" +"1: ldrex %0, [%2]\n" +" add %0, %0, %3\n" +" strex %1, %0, [%2]\n" +" teq %1, #0\n" +" bne 1b" + : "=&r" (result), "=&r" (tmp) + : "r" (&v->counter), "Ir" (i) + : "cc"); +} + static inline int atomic_add_return(int i, atomic_t *v) { unsigned long tmp; int result; + smp_mb(); + __asm__ __volatile__("@ atomic_add_return\n" "1: ldrex %0, [%2]\n" " add %0, %0, %3\n" @@ -59,14 +77,34 @@ static inline int atomic_add_return(int i, atomic_t *v) : "r" (&v->counter), "Ir" (i) : "cc"); + smp_mb(); + return result; } +static inline void atomic_sub(int i, atomic_t *v) +{ + unsigned long tmp; + int result; + + __asm__ __volatile__("@ atomic_sub\n" +"1: ldrex %0, [%2]\n" +" sub %0, %0, %3\n" +" strex %1, %0, [%2]\n" +" teq %1, #0\n" +" bne 1b" + : "=&r" (result), "=&r" (tmp) + : "r" (&v->counter), "Ir" (i) + : "cc"); +} + static inline int atomic_sub_return(int i, atomic_t *v) { unsigned long tmp; int result; + smp_mb(); + __asm__ __volatile__("@ atomic_sub_return\n" "1: ldrex %0, [%2]\n" " sub %0, %0, %3\n" @@ -77,6 +115,8 @@ static inline int atomic_sub_return(int i, atomic_t *v) : "r" (&v->counter), "Ir" (i) : "cc"); + smp_mb(); + return result; } @@ -84,6 +124,8 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) { unsigned long oldval, res; + smp_mb(); + do { __asm__ __volatile__("@ atomic_cmpxchg\n" "ldrex %1, [%2]\n" @@ -95,6 +137,8 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) : "cc"); } while (res); + smp_mb(); + return oldval; } @@ -135,6 +179,7 @@ static inline int atomic_add_return(int i, atomic_t *v) return val; } +#define atomic_add(i, v) (void) atomic_add_return(i, v) static inline int atomic_sub_return(int i, atomic_t *v) { @@ -148,6 +193,7 @@ static inline int atomic_sub_return(int i, atomic_t *v) return val; } +#define atomic_sub(i, v) (void) atomic_sub_return(i, v) static inline int atomic_cmpxchg(atomic_t *v, int old, int new) { @@ -187,10 +233,8 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) } #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) -#define atomic_add(i, v) (void) atomic_add_return(i, v) -#define atomic_inc(v) (void) atomic_add_return(1, v) -#define atomic_sub(i, v) (void) atomic_sub_return(i, v) -#define atomic_dec(v) (void) atomic_sub_return(1, v) +#define atomic_inc(v) atomic_add(1, v) +#define atomic_dec(v) atomic_sub(1, v) #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0) #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0) @@ -200,11 +244,10 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) #define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0) -/* Atomic operations are already serializing on ARM */ -#define smp_mb__before_atomic_dec() barrier() -#define smp_mb__after_atomic_dec() barrier() -#define smp_mb__before_atomic_inc() barrier() -#define smp_mb__after_atomic_inc() barrier() +#define smp_mb__before_atomic_dec() smp_mb() +#define smp_mb__after_atomic_dec() smp_mb() +#define smp_mb__before_atomic_inc() smp_mb() +#define smp_mb__after_atomic_inc() smp_mb() #include #endif diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h index cb7a9e97fd7..feaa75f0013 100644 --- a/arch/arm/include/asm/cache.h +++ b/arch/arm/include/asm/cache.h @@ -7,4 +7,20 @@ #define L1_CACHE_SHIFT 5 #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) +/* + * Memory returned by kmalloc() may be used for DMA, so we must make + * sure that all such allocations are cache aligned. Otherwise, + * unrelated code may cause parts of the buffer to be read into the + * cache before the transfer is done, causing old data to be seen by + * the CPU. + */ +#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES + +/* + * With EABI on ARMv5 and above we must have 64-bit aligned slab pointers. + */ +#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) +#define ARCH_SLAB_MINALIGN 8 +#endif + #endif diff --git a/arch/arm/include/asm/flat.h b/arch/arm/include/asm/flat.h index 1d77e51907f..59426a4595c 100644 --- a/arch/arm/include/asm/flat.h +++ b/arch/arm/include/asm/flat.h @@ -5,9 +5,6 @@ #ifndef __ARM_FLAT_H__ #define __ARM_FLAT_H__ -/* An odd number of words will be pushed after this alignment, so - deliberately misalign the value. */ -#define flat_stack_align(sp) sp = (void *)(((unsigned long)(sp) - 4) | 4) #define flat_argvp_envp_on_stack() 1 #define flat_old_ram_flag(flags) (flags) #define flat_reloc_valid(reloc, size) ((reloc) <= (size)) diff --git a/arch/arm/include/asm/hardware/gic.h b/arch/arm/include/asm/hardware/gic.h index 4924914af18..7f34333bb54 100644 --- a/arch/arm/include/asm/hardware/gic.h +++ b/arch/arm/include/asm/hardware/gic.h @@ -36,7 +36,7 @@ void gic_dist_init(unsigned int gic_nr, void __iomem *base, unsigned int irq_start); void gic_cpu_init(unsigned int gic_nr, void __iomem *base); void gic_cascade_irq(unsigned int gic_nr, unsigned int irq); -void gic_raise_softirq(cpumask_t cpumask, unsigned int irq); +void gic_raise_softirq(const struct cpumask *mask, unsigned int irq); #endif #endif diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h index e6eb8a67b80..7b522770f29 100644 --- a/arch/arm/include/asm/page.h +++ b/arch/arm/include/asm/page.h @@ -202,13 +202,6 @@ typedef struct page *pgtable_t; (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) -/* - * With EABI on ARMv5 and above we must have 64-bit aligned slab pointers. - */ -#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) -#define ARCH_SLAB_MINALIGN 8 -#endif - #include #endif diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h index fad70da5911..5995935338e 100644 --- a/arch/arm/include/asm/smp.h +++ b/arch/arm/include/asm/smp.h @@ -53,17 +53,12 @@ extern void smp_store_cpu_info(unsigned int cpuid); /* * Raise an IPI cross call on CPUs in callmap. */ -extern void smp_cross_call(cpumask_t callmap); - -/* - * Broadcast a timer interrupt to the other CPUs. - */ -extern void smp_send_timer(void); +extern void smp_cross_call(const struct cpumask *mask); /* * Broadcast a clock event to other CPUs. */ -extern void smp_timer_broadcast(cpumask_t mask); +extern void smp_timer_broadcast(const struct cpumask *mask); /* * Boot a secondary CPU, and assign it the specified idle task. @@ -102,7 +97,8 @@ extern int platform_cpu_kill(unsigned int cpu); extern void platform_cpu_enable(unsigned int cpu); extern void arch_send_call_function_single_ipi(int cpu); -extern void arch_send_call_function_ipi(cpumask_t mask); +extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); +#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask /* * Local timer interrupt handling function (can be IPI'ed). diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h index bd4dc8ed53d..d65b2f5bf41 100644 --- a/arch/arm/include/asm/system.h +++ b/arch/arm/include/asm/system.h @@ -248,6 +248,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size unsigned int tmp; #endif + smp_mb(); + switch (size) { #if __LINUX_ARM_ARCH__ >= 6 case 1: @@ -307,6 +309,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size __bad_xchg(ptr, size), ret = 0; break; } + smp_mb(); return ret; } @@ -316,6 +319,12 @@ extern void enable_hlt(void); #include +#if __LINUX_ARM_ARCH__ < 6 + +#ifdef CONFIG_SMP +#error "SMP is not supported on this platform" +#endif + /* * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make * them available. @@ -329,6 +338,173 @@ extern void enable_hlt(void); #include #endif +#else /* __LINUX_ARM_ARCH__ >= 6 */ + +extern void __bad_cmpxchg(volatile void *ptr, int size); + +/* + * cmpxchg only support 32-bits operands on ARMv6. + */ + +static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, + unsigned long new, int size) +{ + unsigned long oldval, res; + + switch (size) { +#ifdef CONFIG_CPU_32v6K + case 1: + do { + asm volatile("@ __cmpxchg1\n" + " ldrexb %1, [%2]\n" + " mov %0, #0\n" + " teq %1, %3\n" + " strexbeq %0, %4, [%2]\n" + : "=&r" (res), "=&r" (oldval) + : "r" (ptr), "Ir" (old), "r" (new) + : "memory", "cc"); + } while (res); + break; + case 2: + do { + asm volatile("@ __cmpxchg1\n" + " ldrexh %1, [%2]\n" + " mov %0, #0\n" + " teq %1, %3\n" + " strexheq %0, %4, [%2]\n" + : "=&r" (res), "=&r" (oldval) + : "r" (ptr), "Ir" (old), "r" (new) + : "memory", "cc"); + } while (res); + break; +#endif /* CONFIG_CPU_32v6K */ + case 4: + do { + asm volatile("@ __cmpxchg4\n" + " ldrex %1, [%2]\n" + " mov %0, #0\n" + " teq %1, %3\n" + " strexeq %0, %4, [%2]\n" + : "=&r" (res), "=&r" (oldval) + : "r" (ptr), "Ir" (old), "r" (new) + : "memory", "cc"); + } while (res); + break; + default: + __bad_cmpxchg(ptr, size); + oldval = 0; + } + + return oldval; +} + +static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old, + unsigned long new, int size) +{ + unsigned long ret; + + smp_mb(); + ret = __cmpxchg(ptr, old, new, size); + smp_mb(); + + return ret; +} + +#define cmpxchg(ptr,o,n) \ + ((__typeof__(*(ptr)))__cmpxchg_mb((ptr), \ + (unsigned long)(o), \ + (unsigned long)(n), \ + sizeof(*(ptr)))) + +static inline unsigned long __cmpxchg_local(volatile void *ptr, + unsigned long old, + unsigned long new, int size) +{ + unsigned long ret; + + switch (size) { +#ifndef CONFIG_CPU_32v6K + case 1: + case 2: + ret = __cmpxchg_local_generic(ptr, old, new, size); + break; +#endif /* !CONFIG_CPU_32v6K */ + default: + ret = __cmpxchg(ptr, old, new, size); + } + + return ret; +} + +#define cmpxchg_local(ptr,o,n) \ + ((__typeof__(*(ptr)))__cmpxchg_local((ptr), \ + (unsigned long)(o), \ + (unsigned long)(n), \ + sizeof(*(ptr)))) + +#ifdef CONFIG_CPU_32v6K + +/* + * Note : ARMv7-M (currently unsupported by Linux) does not support + * ldrexd/strexd. If ARMv7-M is ever supported by the Linux kernel, it should + * not be allowed to use __cmpxchg64. + */ +static inline unsigned long long __cmpxchg64(volatile void *ptr, + unsigned long long old, + unsigned long long new) +{ + register unsigned long long oldval asm("r0"); + register unsigned long long __old asm("r2") = old; + register unsigned long long __new asm("r4") = new; + unsigned long res; + + do { + asm volatile( + " @ __cmpxchg8\n" + " ldrexd %1, %H1, [%2]\n" + " mov %0, #0\n" + " teq %1, %3\n" + " teqeq %H1, %H3\n" + " strexdeq %0, %4, %H4, [%2]\n" + : "=&r" (res), "=&r" (oldval) + : "r" (ptr), "Ir" (__old), "r" (__new) + : "memory", "cc"); + } while (res); + + return oldval; +} + +static inline unsigned long long __cmpxchg64_mb(volatile void *ptr, + unsigned long long old, + unsigned long long new) +{ + unsigned long long ret; + + smp_mb(); + ret = __cmpxchg64(ptr, old, new); + smp_mb(); + + return ret; +} + +#define cmpxchg64(ptr,o,n) \ + ((__typeof__(*(ptr)))__cmpxchg64_mb((ptr), \ + (unsigned long long)(o), \ + (unsigned long long)(n))) + +#define cmpxchg64_local(ptr,o,n) \ + ((__typeof__(*(ptr)))__cmpxchg64((ptr), \ + (unsigned long long)(o), \ + (unsigned long long)(n))) + +#else /* !CONFIG_CPU_32v6K */ + +#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) + +#endif /* CONFIG_CPU_32v6K */ + +#endif /* __LINUX_ARM_ARCH__ >= 6 */ + #endif /* __ASSEMBLY__ */ #define arch_align_stack(x) (x) diff --git a/arch/arm/kernel/elf.c b/arch/arm/kernel/elf.c index d4a0da1e48f..950391f194c 100644 --- a/arch/arm/kernel/elf.c +++ b/arch/arm/kernel/elf.c @@ -78,6 +78,15 @@ int arm_elf_read_implies_exec(const struct elf32_hdr *x, int executable_stack) return 1; if (cpu_architecture() < CPU_ARCH_ARMv6) return 1; +#if !defined(CONFIG_AEABI) || defined(CONFIG_OABI_COMPAT) + /* + * If we have support for OABI programs, we can never allow NX + * support - our signal syscall restart mechanism relies upon + * being able to execute code placed on the user stack. + */ + return 1; +#else return 0; +#endif } EXPORT_SYMBOL(arm_elf_read_implies_exec); diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index d662a2f1fd8..83b1da6b7ba 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -815,10 +815,7 @@ __kuser_helper_start: */ __kuser_memory_barrier: @ 0xffff0fa0 - -#if __LINUX_ARM_ARCH__ >= 6 && defined(CONFIG_SMP) - mcr p15, 0, r0, c7, c10, 5 @ dmb -#endif + smp_dmb usr_ret lr .align 5 diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 7801aac3c04..6014dfd22af 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -326,14 +326,14 @@ void __init smp_prepare_boot_cpu(void) per_cpu(cpu_data, cpu).idle = current; } -static void send_ipi_message(cpumask_t callmap, enum ipi_msg_type msg) +static void send_ipi_message(const struct cpumask *mask, enum ipi_msg_type msg) { unsigned long flags; unsigned int cpu; local_irq_save(flags); - for_each_cpu_mask(cpu, callmap) { + for_each_cpu(cpu, mask) { struct ipi_data *ipi = &per_cpu(ipi_data, cpu); spin_lock(&ipi->lock); @@ -344,19 +344,19 @@ static void send_ipi_message(cpumask_t callmap, enum ipi_msg_type msg) /* * Call the platform specific cross-CPU call function. */ - smp_cross_call(callmap); + smp_cross_call(mask); local_irq_restore(flags); } -void arch_send_call_function_ipi(cpumask_t mask) +void arch_send_call_function_ipi_mask(const struct cpumask *mask) { send_ipi_message(mask, IPI_CALL_FUNC); } void arch_send_call_function_single_ipi(int cpu) { - send_ipi_message(cpumask_of_cpu(cpu), IPI_CALL_FUNC_SINGLE); + send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); } void show_ipi_list(struct seq_file *p) @@ -498,17 +498,10 @@ asmlinkage void __exception do_IPI(struct pt_regs *regs) void smp_send_reschedule(int cpu) { - send_ipi_message(cpumask_of_cpu(cpu), IPI_RESCHEDULE); + send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE); } -void smp_send_timer(void) -{ - cpumask_t mask = cpu_online_map; - cpu_clear(smp_processor_id(), mask); - send_ipi_message(mask, IPI_TIMER); -} - -void smp_timer_broadcast(cpumask_t mask) +void smp_timer_broadcast(const struct cpumask *mask) { send_ipi_message(mask, IPI_TIMER); } @@ -517,7 +510,7 @@ void smp_send_stop(void) { cpumask_t mask = cpu_online_map; cpu_clear(smp_processor_id(), mask); - send_ipi_message(mask, IPI_CPU_STOP); + send_ipi_message(&mask, IPI_CPU_STOP); } /* @@ -528,20 +521,17 @@ int setup_profiling_timer(unsigned int multiplier) return -EINVAL; } -static int -on_each_cpu_mask(void (*func)(void *), void *info, int wait, cpumask_t mask) +static void +on_each_cpu_mask(void (*func)(void *), void *info, int wait, + const struct cpumask *mask) { - int ret = 0; - preempt_disable(); - ret = smp_call_function_mask(mask, func, info, wait); - if (cpu_isset(smp_processor_id(), mask)) + smp_call_function_many(mask, func, info, wait); + if (cpumask_test_cpu(smp_processor_id(), mask)) func(info); preempt_enable(); - - return ret; } /**********************************************************************/ @@ -602,20 +592,17 @@ void flush_tlb_all(void) void flush_tlb_mm(struct mm_struct *mm) { - cpumask_t mask = mm->cpu_vm_mask; - - on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, mask); + on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, &mm->cpu_vm_mask); } void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) { - cpumask_t mask = vma->vm_mm->cpu_vm_mask; struct tlb_args ta; ta.ta_vma = vma; ta.ta_start = uaddr; - on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, mask); + on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, &vma->vm_mm->cpu_vm_mask); } void flush_tlb_kernel_page(unsigned long kaddr) @@ -630,14 +617,13 @@ void flush_tlb_kernel_page(unsigned long kaddr) void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { - cpumask_t mask = vma->vm_mm->cpu_vm_mask; struct tlb_args ta; ta.ta_vma = vma; ta.ta_start = start; ta.ta_end = end; - on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, mask); + on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, &vma->vm_mm->cpu_vm_mask); } void flush_tlb_kernel_range(unsigned long start, unsigned long end) diff --git a/arch/arm/lib/bitops.h b/arch/arm/lib/bitops.h index 2e787d40d59..c7f2627385e 100644 --- a/arch/arm/lib/bitops.h +++ b/arch/arm/lib/bitops.h @@ -18,12 +18,14 @@ mov r2, #1 add r1, r1, r0, lsr #3 @ Get byte offset mov r3, r2, lsl r3 @ create mask + smp_dmb 1: ldrexb r2, [r1] ands r0, r2, r3 @ save old value of bit \instr r2, r2, r3 @ toggle bit strexb ip, r2, [r1] cmp ip, #0 bne 1b + smp_dmb cmp r0, #0 movne r0, #1 2: mov pc, lr diff --git a/arch/arm/mach-davinci/include/mach/asp.h b/arch/arm/mach-davinci/include/mach/asp.h new file mode 100644 index 00000000000..e0abc437d79 --- /dev/null +++ b/arch/arm/mach-davinci/include/mach/asp.h @@ -0,0 +1,25 @@ +/* + * - DaVinci Audio Serial Port support + */ +#ifndef __ASM_ARCH_DAVINCI_ASP_H +#define __ASM_ARCH_DAVINCI_ASP_H + +#include + +/* Bases of register banks */ +#define DAVINCI_ASP0_BASE 0x01E02000 +#define DAVINCI_ASP1_BASE 0x01E04000 + +/* EDMA channels */ +#define DAVINCI_DMA_ASP0_TX 2 +#define DAVINCI_DMA_ASP0_RX 3 +#define DAVINCI_DMA_ASP1_TX 8 +#define DAVINCI_DMA_ASP1_RX 9 + +/* Interrupts */ +#define DAVINCI_ASP0_RX_INT IRQ_MBRINT +#define DAVINCI_ASP0_TX_INT IRQ_MBXINT +#define DAVINCI_ASP1_RX_INT IRQ_MBRINT +#define DAVINCI_ASP1_TX_INT IRQ_MBXINT + +#endif /* __ASM_ARCH_DAVINCI_ASP_H */ diff --git a/arch/arm/mach-ep93xx/clock.c b/arch/arm/mach-ep93xx/clock.c index e8ebeaea6c4..b2eede5531c 100644 --- a/arch/arm/mach-ep93xx/clock.c +++ b/arch/arm/mach-ep93xx/clock.c @@ -21,15 +21,50 @@ #include #include + +/* + * The EP93xx has two external crystal oscillators. To generate the + * required high-frequency clocks, the processor uses two phase-locked- + * loops (PLLs) to multiply the incoming external clock signal to much + * higher frequencies that are then divided down by programmable dividers + * to produce the needed clocks. The PLLs operate independently of one + * another. + */ +#define EP93XX_EXT_CLK_RATE 14745600 +#define EP93XX_EXT_RTC_RATE 32768 + + struct clk { unsigned long rate; int users; + int sw_locked; u32 enable_reg; u32 enable_mask; + + unsigned long (*get_rate)(struct clk *clk); }; -static struct clk clk_uart = { - .rate = 14745600, + +static unsigned long get_uart_rate(struct clk *clk); + + +static struct clk clk_uart1 = { + .sw_locked = 1, + .enable_reg = EP93XX_SYSCON_DEVICE_CONFIG, + .enable_mask = EP93XX_SYSCON_DEVICE_CONFIG_U1EN, + .get_rate = get_uart_rate, +}; +static struct clk clk_uart2 = { + .sw_locked = 1, + .enable_reg = EP93XX_SYSCON_DEVICE_CONFIG, + .enable_mask = EP93XX_SYSCON_DEVICE_CONFIG_U2EN, + .get_rate = get_uart_rate, +}; +static struct clk clk_uart3 = { + .sw_locked = 1, + .enable_reg = EP93XX_SYSCON_DEVICE_CONFIG, + .enable_mask = EP93XX_SYSCON_DEVICE_CONFIG_U3EN, + .get_rate = get_uart_rate, }; static struct clk clk_pll1; static struct clk clk_f; @@ -95,9 +130,9 @@ static struct clk clk_m2m1 = { { .dev_id = dev, .con_id = con, .clk = ck } static struct clk_lookup clocks[] = { - INIT_CK("apb:uart1", NULL, &clk_uart), - INIT_CK("apb:uart2", NULL, &clk_uart), - INIT_CK("apb:uart3", NULL, &clk_uart), + INIT_CK("apb:uart1", NULL, &clk_uart1), + INIT_CK("apb:uart2", NULL, &clk_uart2), + INIT_CK("apb:uart3", NULL, &clk_uart3), INIT_CK(NULL, "pll1", &clk_pll1), INIT_CK(NULL, "fclk", &clk_f), INIT_CK(NULL, "hclk", &clk_h), @@ -125,6 +160,8 @@ int clk_enable(struct clk *clk) u32 value; value = __raw_readl(clk->enable_reg); + if (clk->sw_locked) + __raw_writel(0xaa, EP93XX_SYSCON_SWLOCK); __raw_writel(value | clk->enable_mask, clk->enable_reg); } @@ -138,13 +175,29 @@ void clk_disable(struct clk *clk) u32 value; value = __raw_readl(clk->enable_reg); + if (clk->sw_locked) + __raw_writel(0xaa, EP93XX_SYSCON_SWLOCK); __raw_writel(value & ~clk->enable_mask, clk->enable_reg); } } EXPORT_SYMBOL(clk_disable); +static unsigned long get_uart_rate(struct clk *clk) +{ + u32 value; + + value = __raw_readl(EP93XX_SYSCON_CLOCK_CONTROL); + if (value & EP93XX_SYSCON_CLOCK_UARTBAUD) + return EP93XX_EXT_CLK_RATE; + else + return EP93XX_EXT_CLK_RATE / 2; +} + unsigned long clk_get_rate(struct clk *clk) { + if (clk->get_rate) + return clk->get_rate(clk); + return clk->rate; } EXPORT_SYMBOL(clk_get_rate); @@ -162,7 +215,7 @@ static unsigned long calc_pll_rate(u32 config_word) unsigned long long rate; int i; - rate = 14745600; + rate = EP93XX_EXT_CLK_RATE; rate *= ((config_word >> 11) & 0x1f) + 1; /* X1FBD */ rate *= ((config_word >> 5) & 0x3f) + 1; /* X2FBD */ do_div(rate, (config_word & 0x1f) + 1); /* X2IPD */ @@ -195,7 +248,7 @@ static int __init ep93xx_clock_init(void) value = __raw_readl(EP93XX_SYSCON_CLOCK_SET1); if (!(value & 0x00800000)) { /* PLL1 bypassed? */ - clk_pll1.rate = 14745600; + clk_pll1.rate = EP93XX_EXT_CLK_RATE; } else { clk_pll1.rate = calc_pll_rate(value); } @@ -206,7 +259,7 @@ static int __init ep93xx_clock_init(void) value = __raw_readl(EP93XX_SYSCON_CLOCK_SET2); if (!(value & 0x00080000)) { /* PLL2 bypassed? */ - clk_pll2.rate = 14745600; + clk_pll2.rate = EP93XX_EXT_CLK_RATE; } else if (value & 0x00040000) { /* PLL2 enabled? */ clk_pll2.rate = calc_pll_rate(value); } else { diff --git a/arch/arm/mach-ep93xx/include/mach/ep93xx-regs.h b/arch/arm/mach-ep93xx/include/mach/ep93xx-regs.h index f66be12b856..1732de7629a 100644 --- a/arch/arm/mach-ep93xx/include/mach/ep93xx-regs.h +++ b/arch/arm/mach-ep93xx/include/mach/ep93xx-regs.h @@ -159,7 +159,10 @@ #define EP93XX_SYSCON_CLOCK_SET1 EP93XX_SYSCON_REG(0x20) #define EP93XX_SYSCON_CLOCK_SET2 EP93XX_SYSCON_REG(0x24) #define EP93XX_SYSCON_DEVICE_CONFIG EP93XX_SYSCON_REG(0x80) -#define EP93XX_SYSCON_DEVICE_CONFIG_CRUNCH_ENABLE 0x00800000 +#define EP93XX_SYSCON_DEVICE_CONFIG_U3EN (1<<24) +#define EP93XX_SYSCON_DEVICE_CONFIG_CRUNCH_ENABLE (1<<23) +#define EP93XX_SYSCON_DEVICE_CONFIG_U2EN (1<<20) +#define EP93XX_SYSCON_DEVICE_CONFIG_U1EN (1<<18) #define EP93XX_SYSCON_SWLOCK EP93XX_SYSCON_REG(0xc0) #define EP93XX_WATCHDOG_BASE (EP93XX_APB_VIRT_BASE + 0x00140000) diff --git a/arch/arm/mach-gemini/include/mach/hardware.h b/arch/arm/mach-gemini/include/mach/hardware.h index de6752674c0..213a4fcfeb1 100644 --- a/arch/arm/mach-gemini/include/mach/hardware.h +++ b/arch/arm/mach-gemini/include/mach/hardware.h @@ -15,10 +15,9 @@ /* * Memory Map definitions */ -/* FIXME: Does it really swap SRAM like this? */ #ifdef CONFIG_GEMINI_MEM_SWAP # define GEMINI_DRAM_BASE 0x00000000 -# define GEMINI_SRAM_BASE 0x20000000 +# define GEMINI_SRAM_BASE 0x70000000 #else # define GEMINI_SRAM_BASE 0x00000000 # define GEMINI_DRAM_BASE 0x10000000 diff --git a/arch/arm/mach-integrator/core.c b/arch/arm/mach-integrator/core.c index 6f887291307..a0f60e55da6 100644 --- a/arch/arm/mach-integrator/core.c +++ b/arch/arm/mach-integrator/core.c @@ -121,7 +121,7 @@ static struct clk uartclk = { .rate = 14745600, }; -static struct clk_lookup lookups[] __initdata = { +static struct clk_lookup lookups[] = { { /* UART0 */ .dev_id = "mb:16", .clk = &uartclk, diff --git a/arch/arm/mach-ixp4xx/ixp4xx_npe.c b/arch/arm/mach-ixp4xx/ixp4xx_npe.c index 25231023490..7bb8e778e4b 100644 --- a/arch/arm/mach-ixp4xx/ixp4xx_npe.c +++ b/arch/arm/mach-ixp4xx/ixp4xx_npe.c @@ -714,7 +714,7 @@ static int __init npe_init_module(void) } if (!found) - return -ENOSYS; + return -ENODEV; return 0; } diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c index eeb00240d78..be1ca28fed3 100644 --- a/arch/arm/mach-kirkwood/common.c +++ b/arch/arm/mach-kirkwood/common.c @@ -144,6 +144,9 @@ static struct platform_device kirkwood_ge00 = { .id = 0, .num_resources = 1, .resource = kirkwood_ge00_resources, + .dev = { + .coherent_dma_mask = 0xffffffff, + }, }; void __init kirkwood_ge00_init(struct mv643xx_eth_platform_data *eth_data) @@ -202,6 +205,9 @@ static struct platform_device kirkwood_ge01 = { .id = 1, .num_resources = 1, .resource = kirkwood_ge01_resources, + .dev = { + .coherent_dma_mask = 0xffffffff, + }, }; void __init kirkwood_ge01_init(struct mv643xx_eth_platform_data *eth_data) @@ -386,12 +392,10 @@ static struct mv64xxx_i2c_pdata kirkwood_i2c_pdata = { static struct resource kirkwood_i2c_resources[] = { { - .name = "i2c", .start = I2C_PHYS_BASE, .end = I2C_PHYS_BASE + 0x1f, .flags = IORESOURCE_MEM, }, { - .name = "i2c", .start = IRQ_KIRKWOOD_TWSI, .end = IRQ_KIRKWOOD_TWSI, .flags = IORESOURCE_IRQ, diff --git a/arch/arm/mach-kirkwood/ts219-setup.c b/arch/arm/mach-kirkwood/ts219-setup.c index dda5743cf3e..01aa213c0a6 100644 --- a/arch/arm/mach-kirkwood/ts219-setup.c +++ b/arch/arm/mach-kirkwood/ts219-setup.c @@ -142,6 +142,8 @@ static unsigned int qnap_ts219_mpp_config[] __initdata = { MPP1_SPI_MOSI, MPP2_SPI_SCK, MPP3_SPI_MISO, + MPP4_SATA1_ACTn, + MPP5_SATA0_ACTn, MPP8_TW_SDA, MPP9_TW_SCK, MPP10_UART0_TXD, @@ -150,10 +152,6 @@ static unsigned int qnap_ts219_mpp_config[] __initdata = { MPP14_UART1_RXD, /* PIC controller */ MPP15_GPIO, /* USB Copy button */ MPP16_GPIO, /* Reset button */ - MPP20_SATA1_ACTn, - MPP21_SATA0_ACTn, - MPP22_SATA1_PRESENTn, - MPP23_SATA0_PRESENTn, 0 }; diff --git a/arch/arm/mach-l7200/include/mach/sys-clock.h b/arch/arm/mach-l7200/include/mach/sys-clock.h index 2d7722be60e..e9729a35751 100644 --- a/arch/arm/mach-l7200/include/mach/sys-clock.h +++ b/arch/arm/mach-l7200/include/mach/sys-clock.h @@ -18,7 +18,7 @@ /* IO_START and IO_BASE are defined in hardware.h */ -#define SYS_CLOCK_START (IO_START + SYS_CLCOK_OFF) /* Physical address */ +#define SYS_CLOCK_START (IO_START + SYS_CLOCK_OFF) /* Physical address */ #define SYS_CLOCK_BASE (IO_BASE + SYS_CLOCK_OFF) /* Virtual address */ /* Define the interface to the SYS_CLOCK */ diff --git a/arch/arm/mach-loki/common.c b/arch/arm/mach-loki/common.c index c0d2d9d12e7..818f19d7ab1 100644 --- a/arch/arm/mach-loki/common.c +++ b/arch/arm/mach-loki/common.c @@ -82,6 +82,9 @@ static struct platform_device loki_ge0 = { .id = 0, .num_resources = 1, .resource = loki_ge0_resources, + .dev = { + .coherent_dma_mask = 0xffffffff, + }, }; void __init loki_ge0_init(struct mv643xx_eth_platform_data *eth_data) @@ -136,6 +139,9 @@ static struct platform_device loki_ge1 = { .id = 1, .num_resources = 1, .resource = loki_ge1_resources, + .dev = { + .coherent_dma_mask = 0xffffffff, + }, }; void __init loki_ge1_init(struct mv643xx_eth_platform_data *eth_data) diff --git a/arch/arm/mach-mmp/include/mach/mfp-pxa168.h b/arch/arm/mach-mmp/include/mach/mfp-pxa168.h index d0bdb6e3682..2e914649b9e 100644 --- a/arch/arm/mach-mmp/include/mach/mfp-pxa168.h +++ b/arch/arm/mach-mmp/include/mach/mfp-pxa168.h @@ -3,6 +3,11 @@ #include +#define MFP_DRIVE_VERY_SLOW (0x0 << 13) +#define MFP_DRIVE_SLOW (0x1 << 13) +#define MFP_DRIVE_MEDIUM (0x2 << 13) +#define MFP_DRIVE_FAST (0x3 << 13) + /* GPIO */ #define GPIO0_GPIO MFP_CFG(GPIO0, AF5) #define GPIO1_GPIO MFP_CFG(GPIO1, AF5) diff --git a/arch/arm/mach-mmp/include/mach/mfp-pxa910.h b/arch/arm/mach-mmp/include/mach/mfp-pxa910.h index 48a1cbc7c56..d97de36c50a 100644 --- a/arch/arm/mach-mmp/include/mach/mfp-pxa910.h +++ b/arch/arm/mach-mmp/include/mach/mfp-pxa910.h @@ -3,6 +3,11 @@ #include +#define MFP_DRIVE_VERY_SLOW (0x0 << 13) +#define MFP_DRIVE_SLOW (0x2 << 13) +#define MFP_DRIVE_MEDIUM (0x4 << 13) +#define MFP_DRIVE_FAST (0x8 << 13) + /* UART2 */ #define GPIO47_UART2_RXD MFP_CFG(GPIO47, AF6) #define GPIO48_UART2_TXD MFP_CFG(GPIO48, AF6) diff --git a/arch/arm/mach-mmp/include/mach/mfp.h b/arch/arm/mach-mmp/include/mach/mfp.h index 277ea4cd0f9..62e510e80a5 100644 --- a/arch/arm/mach-mmp/include/mach/mfp.h +++ b/arch/arm/mach-mmp/include/mach/mfp.h @@ -12,16 +12,13 @@ * possible, we make the following compromise: * * 1. SLEEP_OE_N will always be programmed to '1' (by MFP_LPM_FLOAT) - * 2. DRIVE strength definitions redefined to include the reserved bit10 + * 2. DRIVE strength definitions redefined to include the reserved bit + * - the reserved bit differs between pxa168 and pxa910, and the + * MFP_DRIVE_* macros are individually defined in mfp-pxa{168,910}.h * 3. Override MFP_CFG() and MFP_CFG_DRV() * 4. Drop the use of MFP_CFG_LPM() and MFP_CFG_X() */ -#define MFP_DRIVE_VERY_SLOW (0x0 << 13) -#define MFP_DRIVE_SLOW (0x2 << 13) -#define MFP_DRIVE_MEDIUM (0x4 << 13) -#define MFP_DRIVE_FAST (0x8 << 13) - #undef MFP_CFG #undef MFP_CFG_DRV #undef MFP_CFG_LPM diff --git a/arch/arm/mach-mmp/time.c b/arch/arm/mach-mmp/time.c index b03a6eda741..a8400bb891e 100644 --- a/arch/arm/mach-mmp/time.c +++ b/arch/arm/mach-mmp/time.c @@ -136,7 +136,7 @@ static struct clock_event_device ckevt = { .set_mode = timer_set_mode, }; -static cycle_t clksrc_read(void) +static cycle_t clksrc_read(struct clocksource *cs) { return timer_read(); } diff --git a/arch/arm/mach-mv78xx0/common.c b/arch/arm/mach-mv78xx0/common.c index 9ba595083da..1b22e4af879 100644 --- a/arch/arm/mach-mv78xx0/common.c +++ b/arch/arm/mach-mv78xx0/common.c @@ -321,6 +321,9 @@ static struct platform_device mv78xx0_ge00 = { .id = 0, .num_resources = 1, .resource = mv78xx0_ge00_resources, + .dev = { + .coherent_dma_mask = 0xffffffff, + }, }; void __init mv78xx0_ge00_init(struct mv643xx_eth_platform_data *eth_data) @@ -375,6 +378,9 @@ static struct platform_device mv78xx0_ge01 = { .id = 1, .num_resources = 1, .resource = mv78xx0_ge01_resources, + .dev = { + .coherent_dma_mask = 0xffffffff, + }, }; void __init mv78xx0_ge01_init(struct mv643xx_eth_platform_data *eth_data) @@ -429,6 +435,9 @@ static struct platform_device mv78xx0_ge10 = { .id = 2, .num_resources = 1, .resource = mv78xx0_ge10_resources, + .dev = { + .coherent_dma_mask = 0xffffffff, + }, }; void __init mv78xx0_ge10_init(struct mv643xx_eth_platform_data *eth_data) @@ -496,6 +505,9 @@ static struct platform_device mv78xx0_ge11 = { .id = 3, .num_resources = 1, .resource = mv78xx0_ge11_resources, + .dev = { + .coherent_dma_mask = 0xffffffff, + }, }; void __init mv78xx0_ge11_init(struct mv643xx_eth_platform_data *eth_data) @@ -532,12 +544,10 @@ static struct mv64xxx_i2c_pdata mv78xx0_i2c_0_pdata = { static struct resource mv78xx0_i2c_0_resources[] = { { - .name = "i2c 0 base", .start = I2C_0_PHYS_BASE, .end = I2C_0_PHYS_BASE + 0x1f, .flags = IORESOURCE_MEM, }, { - .name = "i2c 0 irq", .start = IRQ_MV78XX0_I2C_0, .end = IRQ_MV78XX0_I2C_0, .flags = IORESOURCE_IRQ, @@ -567,12 +577,10 @@ static struct mv64xxx_i2c_pdata mv78xx0_i2c_1_pdata = { static struct resource mv78xx0_i2c_1_resources[] = { { - .name = "i2c 1 base", .start = I2C_1_PHYS_BASE, .end = I2C_1_PHYS_BASE + 0x1f, .flags = IORESOURCE_MEM, }, { - .name = "i2c 1 irq", .start = IRQ_MV78XX0_I2C_1, .end = IRQ_MV78XX0_I2C_1, .flags = IORESOURCE_IRQ, diff --git a/arch/arm/mach-mx2/clock_imx21.c b/arch/arm/mach-mx2/clock_imx21.c index 999d013e06e..e4b08ca804e 100644 --- a/arch/arm/mach-mx2/clock_imx21.c +++ b/arch/arm/mach-mx2/clock_imx21.c @@ -890,7 +890,7 @@ static struct clk clko_clk = { .con_id = n, \ .clk = &c, \ }, -static struct clk_lookup lookups[] __initdata = { +static struct clk_lookup lookups[] = { /* It's unlikely that any driver wants one of them directly: _REGISTER_CLOCK(NULL, "ckih", ckih_clk) _REGISTER_CLOCK(NULL, "ckil", ckil_clk) diff --git a/arch/arm/mach-mx2/clock_imx27.c b/arch/arm/mach-mx2/clock_imx27.c index 3f7280c490f..2c971442f3f 100644 --- a/arch/arm/mach-mx2/clock_imx27.c +++ b/arch/arm/mach-mx2/clock_imx27.c @@ -621,7 +621,7 @@ DEFINE_CLOCK1(csi_clk, 0, 0, 0, parent, &csi_clk1, &per4_clk); .clk = &c, \ }, -static struct clk_lookup lookups[] __initdata = { +static struct clk_lookup lookups[] = { _REGISTER_CLOCK("imx-uart.0", NULL, uart1_clk) _REGISTER_CLOCK("imx-uart.1", NULL, uart2_clk) _REGISTER_CLOCK("imx-uart.2", NULL, uart3_clk) diff --git a/arch/arm/mach-mx3/clock-imx35.c b/arch/arm/mach-mx3/clock-imx35.c index 53a112d4e04..3c1e06f56dd 100644 --- a/arch/arm/mach-mx3/clock-imx35.c +++ b/arch/arm/mach-mx3/clock-imx35.c @@ -404,7 +404,7 @@ DEFINE_CLOCK(gpu2d_clk, 0, CCM_CGR3, 4, NULL, NULL); .clk = &c, \ }, -static struct clk_lookup lookups[] __initdata = { +static struct clk_lookup lookups[] = { _REGISTER_CLOCK(NULL, "asrc", asrc_clk) _REGISTER_CLOCK(NULL, "ata", ata_clk) _REGISTER_CLOCK(NULL, "audmux", audmux_clk) diff --git a/arch/arm/mach-mx3/clock.c b/arch/arm/mach-mx3/clock.c index 9957a11533a..a68fcf981ed 100644 --- a/arch/arm/mach-mx3/clock.c +++ b/arch/arm/mach-mx3/clock.c @@ -516,7 +516,7 @@ DEFINE_CLOCK(ipg_clk, 0, NULL, 0, ipg_get_rate, NULL, &ahb_clk); .clk = &c, \ }, -static struct clk_lookup lookups[] __initdata = { +static struct clk_lookup lookups[] = { _REGISTER_CLOCK(NULL, "emi", emi_clk) _REGISTER_CLOCK(NULL, "cspi", cspi1_clk) _REGISTER_CLOCK(NULL, "cspi", cspi2_clk) diff --git a/arch/arm/mach-omap2/clock24xx.c b/arch/arm/mach-omap2/clock24xx.c index efc59c49341..e4cef333e29 100644 --- a/arch/arm/mach-omap2/clock24xx.c +++ b/arch/arm/mach-omap2/clock24xx.c @@ -103,10 +103,10 @@ static struct omap_clk omap24xx_clks[] = { CLK(NULL, "mdm_ick", &mdm_ick, CK_243X), CLK(NULL, "mdm_osc_ck", &mdm_osc_ck, CK_243X), /* DSS domain clocks */ - CLK(NULL, "dss_ick", &dss_ick, CK_243X | CK_242X), - CLK(NULL, "dss1_fck", &dss1_fck, CK_243X | CK_242X), - CLK(NULL, "dss2_fck", &dss2_fck, CK_243X | CK_242X), - CLK(NULL, "dss_54m_fck", &dss_54m_fck, CK_243X | CK_242X), + CLK("omapfb", "ick", &dss_ick, CK_243X | CK_242X), + CLK("omapfb", "dss1_fck", &dss1_fck, CK_243X | CK_242X), + CLK("omapfb", "dss2_fck", &dss2_fck, CK_243X | CK_242X), + CLK("omapfb", "tv_fck", &dss_54m_fck, CK_243X | CK_242X), /* L3 domain clocks */ CLK(NULL, "core_l3_ck", &core_l3_ck, CK_243X | CK_242X), CLK(NULL, "ssi_fck", &ssi_ssr_sst_fck, CK_243X | CK_242X), @@ -206,7 +206,7 @@ static struct omap_clk omap24xx_clks[] = { CLK(NULL, "aes_ick", &aes_ick, CK_243X | CK_242X), CLK(NULL, "pka_ick", &pka_ick, CK_243X | CK_242X), CLK(NULL, "usb_fck", &usb_fck, CK_243X | CK_242X), - CLK(NULL, "usbhs_ick", &usbhs_ick, CK_243X), + CLK("musb_hdrc", "ick", &usbhs_ick, CK_243X), CLK("mmci-omap-hs.0", "ick", &mmchs1_ick, CK_243X), CLK("mmci-omap-hs.0", "fck", &mmchs1_fck, CK_243X), CLK("mmci-omap-hs.1", "ick", &mmchs2_ick, CK_243X), diff --git a/arch/arm/mach-omap2/clock34xx.c b/arch/arm/mach-omap2/clock34xx.c index 0a14dca31e3..ba05aa42bd8 100644 --- a/arch/arm/mach-omap2/clock34xx.c +++ b/arch/arm/mach-omap2/clock34xx.c @@ -157,7 +157,7 @@ static struct omap_clk omap34xx_clks[] = { CLK(NULL, "ssi_ssr_fck", &ssi_ssr_fck, CK_343X), CLK(NULL, "ssi_sst_fck", &ssi_sst_fck, CK_343X), CLK(NULL, "core_l3_ick", &core_l3_ick, CK_343X), - CLK(NULL, "hsotgusb_ick", &hsotgusb_ick, CK_343X), + CLK("musb_hdrc", "ick", &hsotgusb_ick, CK_343X), CLK(NULL, "sdrc_ick", &sdrc_ick, CK_343X), CLK(NULL, "gpmc_fck", &gpmc_fck, CK_343X), CLK(NULL, "security_l3_ick", &security_l3_ick, CK_343X), @@ -197,11 +197,11 @@ static struct omap_clk omap34xx_clks[] = { CLK("omap_rng", "ick", &rng_ick, CK_343X), CLK(NULL, "sha11_ick", &sha11_ick, CK_343X), CLK(NULL, "des1_ick", &des1_ick, CK_343X), - CLK(NULL, "dss1_alwon_fck", &dss1_alwon_fck, CK_343X), - CLK(NULL, "dss_tv_fck", &dss_tv_fck, CK_343X), - CLK(NULL, "dss_96m_fck", &dss_96m_fck, CK_343X), - CLK(NULL, "dss2_alwon_fck", &dss2_alwon_fck, CK_343X), - CLK(NULL, "dss_ick", &dss_ick, CK_343X), + CLK("omapfb", "dss1_fck", &dss1_alwon_fck, CK_343X), + CLK("omapfb", "tv_fck", &dss_tv_fck, CK_343X), + CLK("omapfb", "video_fck", &dss_96m_fck, CK_343X), + CLK("omapfb", "dss2_fck", &dss2_alwon_fck, CK_343X), + CLK("omapfb", "ick", &dss_ick, CK_343X), CLK(NULL, "cam_mclk", &cam_mclk, CK_343X), CLK(NULL, "cam_ick", &cam_ick, CK_343X), CLK(NULL, "csi2_96m_fck", &csi2_96m_fck, CK_343X), diff --git a/arch/arm/mach-omap2/clock34xx.h b/arch/arm/mach-omap2/clock34xx.h index 6763b8f7302..017a30e9aa1 100644 --- a/arch/arm/mach-omap2/clock34xx.h +++ b/arch/arm/mach-omap2/clock34xx.h @@ -2182,7 +2182,7 @@ static struct clk wkup_32k_fck = { static struct clk gpio1_dbck = { .name = "gpio1_dbck", - .ops = &clkops_omap2_dflt_wait, + .ops = &clkops_omap2_dflt, .parent = &wkup_32k_fck, .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN), .enable_bit = OMAP3430_EN_GPIO1_SHIFT, @@ -2427,7 +2427,7 @@ static struct clk per_32k_alwon_fck = { static struct clk gpio6_dbck = { .name = "gpio6_dbck", - .ops = &clkops_omap2_dflt_wait, + .ops = &clkops_omap2_dflt, .parent = &per_32k_alwon_fck, .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN), .enable_bit = OMAP3430_EN_GPIO6_SHIFT, @@ -2437,7 +2437,7 @@ static struct clk gpio6_dbck = { static struct clk gpio5_dbck = { .name = "gpio5_dbck", - .ops = &clkops_omap2_dflt_wait, + .ops = &clkops_omap2_dflt, .parent = &per_32k_alwon_fck, .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN), .enable_bit = OMAP3430_EN_GPIO5_SHIFT, @@ -2447,7 +2447,7 @@ static struct clk gpio5_dbck = { static struct clk gpio4_dbck = { .name = "gpio4_dbck", - .ops = &clkops_omap2_dflt_wait, + .ops = &clkops_omap2_dflt, .parent = &per_32k_alwon_fck, .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN), .enable_bit = OMAP3430_EN_GPIO4_SHIFT, @@ -2457,7 +2457,7 @@ static struct clk gpio4_dbck = { static struct clk gpio3_dbck = { .name = "gpio3_dbck", - .ops = &clkops_omap2_dflt_wait, + .ops = &clkops_omap2_dflt, .parent = &per_32k_alwon_fck, .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN), .enable_bit = OMAP3430_EN_GPIO3_SHIFT, @@ -2467,7 +2467,7 @@ static struct clk gpio3_dbck = { static struct clk gpio2_dbck = { .name = "gpio2_dbck", - .ops = &clkops_omap2_dflt_wait, + .ops = &clkops_omap2_dflt, .parent = &per_32k_alwon_fck, .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN), .enable_bit = OMAP3430_EN_GPIO2_SHIFT, diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c index 496983ade97..894cc355818 100644 --- a/arch/arm/mach-omap2/devices.c +++ b/arch/arm/mach-omap2/devices.c @@ -354,10 +354,12 @@ static void omap_init_mcspi(void) platform_device_register(&omap2_mcspi1); platform_device_register(&omap2_mcspi2); #if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) - platform_device_register(&omap2_mcspi3); + if (cpu_is_omap2430() || cpu_is_omap343x()) + platform_device_register(&omap2_mcspi3); #endif #ifdef CONFIG_ARCH_OMAP3 - platform_device_register(&omap2_mcspi4); + if (cpu_is_omap343x()) + platform_device_register(&omap2_mcspi4); #endif } diff --git a/arch/arm/mach-omap2/prm-regbits-34xx.h b/arch/arm/mach-omap2/prm-regbits-34xx.h index c6a7940f428..9fd03a2ec95 100644 --- a/arch/arm/mach-omap2/prm-regbits-34xx.h +++ b/arch/arm/mach-omap2/prm-regbits-34xx.h @@ -409,7 +409,7 @@ /* PM_PREPWSTST_CAM specific bits */ /* PM_PWSTCTRL_USBHOST specific bits */ -#define OMAP3430ES2_SAVEANDRESTORE_SHIFT (1 << 4) +#define OMAP3430ES2_SAVEANDRESTORE_SHIFT 4 /* RM_RSTST_PER specific bits */ diff --git a/arch/arm/mach-omap2/usb-tusb6010.c b/arch/arm/mach-omap2/usb-tusb6010.c index 8df55f40f4c..8622c24cd27 100644 --- a/arch/arm/mach-omap2/usb-tusb6010.c +++ b/arch/arm/mach-omap2/usb-tusb6010.c @@ -187,7 +187,7 @@ int tusb6010_platform_retime(unsigned is_refclk) unsigned sysclk_ps; int status; - if (!refclk_psec || sysclk_ps == 0) + if (!refclk_psec || fclk_ps == 0) return -ENODEV; sysclk_ps = is_refclk ? refclk_psec : TUSB6010_OSCCLK_60; diff --git a/arch/arm/mach-orion5x/common.c b/arch/arm/mach-orion5x/common.c index 6af99ddabdf..b1c7778d9f9 100644 --- a/arch/arm/mach-orion5x/common.c +++ b/arch/arm/mach-orion5x/common.c @@ -188,6 +188,9 @@ static struct platform_device orion5x_eth = { .id = 0, .num_resources = 1, .resource = orion5x_eth_resources, + .dev = { + .coherent_dma_mask = 0xffffffff, + }, }; void __init orion5x_eth_init(struct mv643xx_eth_platform_data *eth_data) @@ -248,12 +251,10 @@ static struct mv64xxx_i2c_pdata orion5x_i2c_pdata = { static struct resource orion5x_i2c_resources[] = { { - .name = "i2c base", .start = I2C_PHYS_BASE, .end = I2C_PHYS_BASE + 0x1f, .flags = IORESOURCE_MEM, }, { - .name = "i2c irq", .start = IRQ_ORION5X_I2C, .end = IRQ_ORION5X_I2C, .flags = IORESOURCE_IRQ, diff --git a/arch/arm/mach-pxa/devices.c b/arch/arm/mach-pxa/devices.c index d245e59c51b..29970f703f3 100644 --- a/arch/arm/mach-pxa/devices.c +++ b/arch/arm/mach-pxa/devices.c @@ -72,7 +72,10 @@ void __init pxa_set_mci_info(struct pxamci_platform_data *info) } -static struct pxa2xx_udc_mach_info pxa_udc_info; +static struct pxa2xx_udc_mach_info pxa_udc_info = { + .gpio_pullup = -1, + .gpio_vbus = -1, +}; void __init pxa_set_udc_info(struct pxa2xx_udc_mach_info *info) { diff --git a/arch/arm/mach-pxa/ezx.c b/arch/arm/mach-pxa/ezx.c index 92ba16e1b6f..7db966dc29c 100644 --- a/arch/arm/mach-pxa/ezx.c +++ b/arch/arm/mach-pxa/ezx.c @@ -111,9 +111,9 @@ static unsigned long ezx_pin_config[] __initdata = { GPIO25_SSP1_TXD, GPIO26_SSP1_RXD, GPIO24_GPIO, /* pcap chip select */ - GPIO1_GPIO, /* pcap interrupt */ - GPIO4_GPIO, /* WDI_AP */ - GPIO55_GPIO, /* SYS_RESTART */ + GPIO1_GPIO | WAKEUP_ON_EDGE_RISE, /* pcap interrupt */ + GPIO4_GPIO | MFP_LPM_DRIVE_HIGH, /* WDI_AP */ + GPIO55_GPIO | MFP_LPM_DRIVE_HIGH, /* SYS_RESTART */ /* MMC */ GPIO32_MMC_CLK, @@ -144,20 +144,20 @@ static unsigned long ezx_pin_config[] __initdata = { #if defined(CONFIG_MACH_EZX_A780) || defined(CONFIG_MACH_EZX_E680) static unsigned long gen1_pin_config[] __initdata = { /* flip / lockswitch */ - GPIO12_GPIO, + GPIO12_GPIO | WAKEUP_ON_EDGE_BOTH, /* bluetooth (bcm2035) */ - GPIO14_GPIO | WAKEUP_ON_LEVEL_HIGH, /* HOSTWAKE */ + GPIO14_GPIO | WAKEUP_ON_EDGE_RISE, /* HOSTWAKE */ GPIO48_GPIO, /* RESET */ GPIO28_GPIO, /* WAKEUP */ /* Neptune handshake */ - GPIO0_GPIO | WAKEUP_ON_LEVEL_HIGH, /* BP_RDY */ - GPIO57_GPIO, /* AP_RDY */ - GPIO13_GPIO | WAKEUP_ON_LEVEL_HIGH, /* WDI */ - GPIO3_GPIO | WAKEUP_ON_LEVEL_HIGH, /* WDI2 */ - GPIO82_GPIO, /* RESET */ - GPIO99_GPIO, /* TC_MM_EN */ + GPIO0_GPIO | WAKEUP_ON_EDGE_FALL, /* BP_RDY */ + GPIO57_GPIO | MFP_LPM_DRIVE_HIGH, /* AP_RDY */ + GPIO13_GPIO | WAKEUP_ON_EDGE_BOTH, /* WDI */ + GPIO3_GPIO | WAKEUP_ON_EDGE_BOTH, /* WDI2 */ + GPIO82_GPIO | MFP_LPM_DRIVE_HIGH, /* RESET */ + GPIO99_GPIO | MFP_LPM_DRIVE_HIGH, /* TC_MM_EN */ /* sound */ GPIO52_SSP3_SCLK, @@ -199,21 +199,21 @@ static unsigned long gen1_pin_config[] __initdata = { defined(CONFIG_MACH_EZX_E2) || defined(CONFIG_MACH_EZX_E6) static unsigned long gen2_pin_config[] __initdata = { /* flip / lockswitch */ - GPIO15_GPIO, + GPIO15_GPIO | WAKEUP_ON_EDGE_BOTH, /* EOC */ - GPIO10_GPIO, + GPIO10_GPIO | WAKEUP_ON_EDGE_RISE, /* bluetooth (bcm2045) */ - GPIO13_GPIO | WAKEUP_ON_LEVEL_HIGH, /* HOSTWAKE */ + GPIO13_GPIO | WAKEUP_ON_EDGE_RISE, /* HOSTWAKE */ GPIO37_GPIO, /* RESET */ GPIO57_GPIO, /* WAKEUP */ /* Neptune handshake */ - GPIO0_GPIO | WAKEUP_ON_LEVEL_HIGH, /* BP_RDY */ - GPIO96_GPIO, /* AP_RDY */ - GPIO3_GPIO | WAKEUP_ON_LEVEL_HIGH, /* WDI */ - GPIO116_GPIO, /* RESET */ + GPIO0_GPIO | WAKEUP_ON_EDGE_FALL, /* BP_RDY */ + GPIO96_GPIO | MFP_LPM_DRIVE_HIGH, /* AP_RDY */ + GPIO3_GPIO | WAKEUP_ON_EDGE_FALL, /* WDI */ + GPIO116_GPIO | MFP_LPM_DRIVE_HIGH, /* RESET */ GPIO41_GPIO, /* BP_FLASH */ /* sound */ diff --git a/arch/arm/mach-pxa/imote2.c b/arch/arm/mach-pxa/imote2.c index 2121309b247..2b27336c29f 100644 --- a/arch/arm/mach-pxa/imote2.c +++ b/arch/arm/mach-pxa/imote2.c @@ -412,7 +412,7 @@ static struct platform_device imote2_flash_device = { */ static struct i2c_board_info __initdata imote2_i2c_board_info[] = { { /* UCAM sensor board */ - .type = "max1238", + .type = "max1239", .addr = 0x35, }, { /* ITS400 Sensor board only */ .type = "max1363", diff --git a/arch/arm/mach-pxa/include/mach/reset.h b/arch/arm/mach-pxa/include/mach/reset.h index 31e6a7b6ad8..b6c10556fbc 100644 --- a/arch/arm/mach-pxa/include/mach/reset.h +++ b/arch/arm/mach-pxa/include/mach/reset.h @@ -13,8 +13,9 @@ extern void clear_reset_status(unsigned int mask); /** * init_gpio_reset() - register GPIO as reset generator * @gpio: gpio nr - * @output: set gpio as out/low instead of input during normal work + * @output: set gpio as output instead of input during normal work + * @level: output level */ -extern int init_gpio_reset(int gpio, int output); +extern int init_gpio_reset(int gpio, int output, int level); #endif /* __ASM_ARCH_RESET_H */ diff --git a/arch/arm/mach-pxa/mfp-pxa2xx.c b/arch/arm/mach-pxa/mfp-pxa2xx.c index 7ffb91d64c3..cf6b720c055 100644 --- a/arch/arm/mach-pxa/mfp-pxa2xx.c +++ b/arch/arm/mach-pxa/mfp-pxa2xx.c @@ -322,6 +322,7 @@ static inline void pxa27x_mfp_init(void) {} #ifdef CONFIG_PM static unsigned long saved_gafr[2][4]; static unsigned long saved_gpdr[4]; +static unsigned long saved_pgsr[4]; static int pxa2xx_mfp_suspend(struct sys_device *d, pm_message_t state) { @@ -332,6 +333,7 @@ static int pxa2xx_mfp_suspend(struct sys_device *d, pm_message_t state) saved_gafr[0][i] = GAFR_L(i); saved_gafr[1][i] = GAFR_U(i); saved_gpdr[i] = GPDR(i * 32); + saved_pgsr[i] = PGSR(i); GPDR(i * 32) = gpdr_lpm[i]; } @@ -346,6 +348,7 @@ static int pxa2xx_mfp_resume(struct sys_device *d) GAFR_L(i) = saved_gafr[0][i]; GAFR_U(i) = saved_gafr[1][i]; GPDR(i * 32) = saved_gpdr[i]; + PGSR(i) = saved_pgsr[i]; } PSSR = PSSR_RDH | PSSR_PH; return 0; @@ -374,6 +377,9 @@ static int __init pxa2xx_mfp_init(void) if (cpu_is_pxa27x()) pxa27x_mfp_init(); + /* clear RDH bit to enable GPIO receivers after reset/sleep exit */ + PSSR = PSSR_RDH; + /* initialize gafr_run[], pgsr_lpm[] from existing values */ for (i = 0; i <= gpio_to_bank(pxa_last_gpio); i++) gpdr_lpm[i] = GPDR(i * 32); diff --git a/arch/arm/mach-pxa/palmld.c b/arch/arm/mach-pxa/palmld.c index 1cec1806f00..471a853e548 100644 --- a/arch/arm/mach-pxa/palmld.c +++ b/arch/arm/mach-pxa/palmld.c @@ -62,6 +62,8 @@ static unsigned long palmld_pin_config[] __initdata = { GPIO29_AC97_SDATA_IN_0, GPIO30_AC97_SDATA_OUT, GPIO31_AC97_SYNC, + GPIO89_AC97_SYSCLK, + GPIO95_AC97_nRESET, /* IrDA */ GPIO108_GPIO, /* ir disable */ diff --git a/arch/arm/mach-pxa/palmt5.c b/arch/arm/mach-pxa/palmt5.c index 30662363907..05bf979b78a 100644 --- a/arch/arm/mach-pxa/palmt5.c +++ b/arch/arm/mach-pxa/palmt5.c @@ -64,6 +64,7 @@ static unsigned long palmt5_pin_config[] __initdata = { GPIO29_AC97_SDATA_IN_0, GPIO30_AC97_SDATA_OUT, GPIO31_AC97_SYNC, + GPIO89_AC97_SYSCLK, GPIO95_AC97_nRESET, /* IrDA */ diff --git a/arch/arm/mach-pxa/palmtx.c b/arch/arm/mach-pxa/palmtx.c index e2d44b1a8a9..e99a893c58a 100644 --- a/arch/arm/mach-pxa/palmtx.c +++ b/arch/arm/mach-pxa/palmtx.c @@ -65,6 +65,7 @@ static unsigned long palmtx_pin_config[] __initdata = { GPIO29_AC97_SDATA_IN_0, GPIO30_AC97_SDATA_OUT, GPIO31_AC97_SYNC, + GPIO89_AC97_SYSCLK, GPIO95_AC97_nRESET, /* IrDA */ diff --git a/arch/arm/mach-pxa/reset.c b/arch/arm/mach-pxa/reset.c index df29d45fb4e..01e9d643394 100644 --- a/arch/arm/mach-pxa/reset.c +++ b/arch/arm/mach-pxa/reset.c @@ -20,7 +20,7 @@ static void do_hw_reset(void); static int reset_gpio = -1; -int init_gpio_reset(int gpio, int output) +int init_gpio_reset(int gpio, int output, int level) { int rc; @@ -31,7 +31,7 @@ int init_gpio_reset(int gpio, int output) } if (output) - rc = gpio_direction_output(gpio, 0); + rc = gpio_direction_output(gpio, level); else rc = gpio_direction_input(gpio); if (rc) { diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c index c18e34acafc..5a45fe340a1 100644 --- a/arch/arm/mach-pxa/spitz.c +++ b/arch/arm/mach-pxa/spitz.c @@ -531,9 +531,15 @@ static int spitz_ohci_init(struct device *dev) return gpio_direction_output(SPITZ_GPIO_USB_HOST, 1); } +static void spitz_ohci_exit(struct device *dev) +{ + gpio_free(SPITZ_GPIO_USB_HOST); +} + static struct pxaohci_platform_data spitz_ohci_platform_data = { .port_mode = PMM_NPS_MODE, .init = spitz_ohci_init, + .exit = spitz_ohci_exit, .flags = ENABLE_PORT_ALL | NO_OC_PROTECTION, .power_budget = 150, }; @@ -731,7 +737,7 @@ static void spitz_restart(char mode, const char *cmd) static void __init common_init(void) { - init_gpio_reset(SPITZ_GPIO_ON_RESET, 1); + init_gpio_reset(SPITZ_GPIO_ON_RESET, 1, 0); pm_power_off = spitz_poweroff; arm_pm_restart = spitz_restart; diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c index afac5b6d3d7..a0bd46ef5d3 100644 --- a/arch/arm/mach-pxa/tosa.c +++ b/arch/arm/mach-pxa/tosa.c @@ -897,7 +897,7 @@ static void __init tosa_init(void) gpio_set_wake(MFP_PIN_GPIO1, 1); /* We can't pass to gpio-keys since it will drop the Reset altfunc */ - init_gpio_reset(TOSA_GPIO_ON_RESET, 0); + init_gpio_reset(TOSA_GPIO_ON_RESET, 0, 0); pm_power_off = tosa_poweroff; arm_pm_restart = tosa_restart; diff --git a/arch/arm/mach-pxa/viper.c b/arch/arm/mach-pxa/viper.c index 0e65344e9f5..dd031cc4184 100644 --- a/arch/arm/mach-pxa/viper.c +++ b/arch/arm/mach-pxa/viper.c @@ -46,6 +46,7 @@ #include #include #include +#include #include #include diff --git a/arch/arm/mach-realview/core.c b/arch/arm/mach-realview/core.c index 942e1a7eb9b..076acbc5070 100644 --- a/arch/arm/mach-realview/core.c +++ b/arch/arm/mach-realview/core.c @@ -750,14 +750,6 @@ void __init realview_timer_init(unsigned int timer_irq) { u32 val; -#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST - /* - * The dummy clock device has to be registered before the main device - * so that the latter will broadcast the clock events - */ - local_timer_setup(); -#endif - /* * set clock frequency: * REALVIEW_REFCLK is 32KHz diff --git a/arch/arm/mach-realview/include/mach/smp.h b/arch/arm/mach-realview/include/mach/smp.h index 515819efd04..dd53892d44a 100644 --- a/arch/arm/mach-realview/include/mach/smp.h +++ b/arch/arm/mach-realview/include/mach/smp.h @@ -15,16 +15,9 @@ /* * We use IRQ1 as the IPI */ -static inline void smp_cross_call(cpumask_t callmap) -{ - gic_raise_softirq(callmap, 1); -} - -/* - * Do nothing on MPcore. - */ -static inline void smp_cross_call_done(cpumask_t callmap) +static inline void smp_cross_call(const struct cpumask *mask) { + gic_raise_softirq(mask, 1); } #endif diff --git a/arch/arm/mach-realview/localtimer.c b/arch/arm/mach-realview/localtimer.c index d0d39adf640..1c01d13460f 100644 --- a/arch/arm/mach-realview/localtimer.c +++ b/arch/arm/mach-realview/localtimer.c @@ -189,8 +189,10 @@ void __cpuinit local_timer_setup(void) struct clock_event_device *clk = &per_cpu(local_clockevent, cpu); clk->name = "dummy_timer"; - clk->features = CLOCK_EVT_FEAT_DUMMY; - clk->rating = 200; + clk->features = CLOCK_EVT_FEAT_ONESHOT | + CLOCK_EVT_FEAT_PERIODIC | + CLOCK_EVT_FEAT_DUMMY; + clk->rating = 400; clk->mult = 1; clk->set_mode = dummy_timer_set_mode; clk->broadcast = smp_timer_broadcast; diff --git a/arch/arm/mach-realview/platsmp.c b/arch/arm/mach-realview/platsmp.c index ea3c75595fa..30a9c68591f 100644 --- a/arch/arm/mach-realview/platsmp.c +++ b/arch/arm/mach-realview/platsmp.c @@ -77,13 +77,6 @@ void __cpuinit platform_secondary_init(unsigned int cpu) { trace_hardirqs_off(); - /* - * the primary core may have used a "cross call" soft interrupt - * to get this processor out of WFI in the BootMonitor - make - * sure that we are no longer being sent this soft interrupt - */ - smp_cross_call_done(cpumask_of_cpu(cpu)); - /* * if any interrupts are already enabled for the primary * core (e.g. timer irq), then they will not have been enabled @@ -136,7 +129,7 @@ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) * Use smp_cross_call() for this, since there's little * point duplicating the code here */ - smp_cross_call(cpumask_of_cpu(cpu)); + smp_cross_call(cpumask_of(cpu)); timeout = jiffies + (1 * HZ); while (time_before(jiffies, timeout)) { @@ -224,11 +217,9 @@ void __init smp_prepare_cpus(unsigned int max_cpus) if (max_cpus > ncores) max_cpus = ncores; -#ifdef CONFIG_LOCAL_TIMERS +#if defined(CONFIG_LOCAL_TIMERS) || defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) /* - * Enable the local timer for primary CPU. If the device is - * dummy (!CONFIG_LOCAL_TIMERS), it was already registers in - * realview_timer_init + * Enable the local timer or broadcast device for the boot CPU. */ local_timer_setup(); #endif diff --git a/arch/arm/mach-s3c2410/mach-bast.c b/arch/arm/mach-s3c2410/mach-bast.c index 4389c160f7d..8637dea5e15 100644 --- a/arch/arm/mach-s3c2410/mach-bast.c +++ b/arch/arm/mach-s3c2410/mach-bast.c @@ -588,8 +588,6 @@ static void __init bast_map_io(void) s3c_device_nand.dev.platform_data = &bast_nand_info; - s3c_i2c0_set_platdata(&bast_i2c_info); - s3c24xx_init_io(bast_iodesc, ARRAY_SIZE(bast_iodesc)); s3c24xx_init_clocks(0); s3c24xx_init_uarts(bast_uartcfgs, ARRAY_SIZE(bast_uartcfgs)); @@ -602,6 +600,7 @@ static void __init bast_init(void) sysdev_class_register(&bast_pm_sysclass); sysdev_register(&bast_pm_sysdev); + s3c_i2c0_set_platdata(&bast_i2c_info); s3c24xx_fb_set_platdata(&bast_fb_info); platform_add_devices(bast_devices, ARRAY_SIZE(bast_devices)); diff --git a/arch/arm/mach-versatile/core.c b/arch/arm/mach-versatile/core.c index 1f929c391af..b3bebcc5623 100644 --- a/arch/arm/mach-versatile/core.c +++ b/arch/arm/mach-versatile/core.c @@ -413,7 +413,7 @@ static struct clk ref24_clk = { .rate = 24000000, }; -static struct clk_lookup lookups[] __initdata = { +static struct clk_lookup lookups[] = { { /* UART0 */ .dev_id = "dev:f1", .clk = &ref24_clk, diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 3397f1e64d7..a08d9d2380d 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -184,23 +184,37 @@ __v7_setup: stmia r12, {r0-r5, r7, r9, r11, lr} bl v7_flush_dcache_all ldmia r12, {r0-r5, r7, r9, r11, lr} + + mrc p15, 0, r0, c0, c0, 0 @ read main ID register + and r10, r0, #0xff000000 @ ARM? + teq r10, #0x41000000 + bne 2f + and r5, r0, #0x00f00000 @ variant + and r6, r0, #0x0000000f @ revision + orr r0, r6, r5, lsr #20-4 @ combine variant and revision + #ifdef CONFIG_ARM_ERRATA_430973 - mrc p15, 0, r10, c1, c0, 1 @ read aux control register - orr r10, r10, #(1 << 6) @ set IBE to 1 - mcr p15, 0, r10, c1, c0, 1 @ write aux control register + teq r5, #0x00100000 @ only present in r1p* + mrceq p15, 0, r10, c1, c0, 1 @ read aux control register + orreq r10, r10, #(1 << 6) @ set IBE to 1 + mcreq p15, 0, r10, c1, c0, 1 @ write aux control register #endif #ifdef CONFIG_ARM_ERRATA_458693 - mrc p15, 0, r10, c1, c0, 1 @ read aux control register - orr r10, r10, #(1 << 5) @ set L1NEON to 1 - orr r10, r10, #(1 << 9) @ set PLDNOP to 1 - mcr p15, 0, r10, c1, c0, 1 @ write aux control register + teq r0, #0x20 @ only present in r2p0 + mrceq p15, 0, r10, c1, c0, 1 @ read aux control register + orreq r10, r10, #(1 << 5) @ set L1NEON to 1 + orreq r10, r10, #(1 << 9) @ set PLDNOP to 1 + mcreq p15, 0, r10, c1, c0, 1 @ write aux control register #endif #ifdef CONFIG_ARM_ERRATA_460075 - mrc p15, 1, r10, c9, c0, 2 @ read L2 cache aux ctrl register - orr r10, r10, #(1 << 22) @ set the Write Allocate disable bit - mcr p15, 1, r10, c9, c0, 2 @ write the L2 cache aux ctrl register + teq r0, #0x20 @ only present in r2p0 + mrceq p15, 1, r10, c9, c0, 2 @ read L2 cache aux ctrl register + tsteq r10, #1 << 22 + orreq r10, r10, #(1 << 22) @ set the Write Allocate disable bit + mcreq p15, 1, r10, c9, c0, 2 @ write the L2 cache aux ctrl register #endif - mov r10, #0 + +2: mov r10, #0 #ifdef HARVARD_CACHE mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate #endif diff --git a/arch/arm/nwfpe/fpa11.h b/arch/arm/nwfpe/fpa11.h index 386cbd13eaf..d3a6f9298e9 100644 --- a/arch/arm/nwfpe/fpa11.h +++ b/arch/arm/nwfpe/fpa11.h @@ -114,4 +114,8 @@ extern unsigned int SingleCPDO(struct roundingData *roundData, extern unsigned int DoubleCPDO(struct roundingData *roundData, const unsigned int opcode, FPREG * rFd); +/* extneded_cpdo.c */ +extern unsigned int ExtendedCPDO(struct roundingData *roundData, + const unsigned int opcode, FPREG * rFd); + #endif diff --git a/arch/arm/nwfpe/fpa11_cprt.c b/arch/arm/nwfpe/fpa11_cprt.c index 9843dc53304..31c4eeec18b 100644 --- a/arch/arm/nwfpe/fpa11_cprt.c +++ b/arch/arm/nwfpe/fpa11_cprt.c @@ -27,10 +27,6 @@ #include "fpmodule.inl" #include "softfloat.h" -#ifdef CONFIG_FPE_NWFPE_XP -extern flag floatx80_is_nan(floatx80); -#endif - unsigned int PerformFLT(const unsigned int opcode); unsigned int PerformFIX(const unsigned int opcode); diff --git a/arch/arm/nwfpe/softfloat.h b/arch/arm/nwfpe/softfloat.h index 260fe29d73f..13e479c5da5 100644 --- a/arch/arm/nwfpe/softfloat.h +++ b/arch/arm/nwfpe/softfloat.h @@ -226,6 +226,8 @@ char floatx80_le_quiet( floatx80, floatx80 ); char floatx80_lt_quiet( floatx80, floatx80 ); char floatx80_is_signaling_nan( floatx80 ); +extern flag floatx80_is_nan(floatx80); + #endif static inline flag extractFloat32Sign(float32 a) diff --git a/arch/arm/plat-omap/fb.c b/arch/arm/plat-omap/fb.c index ce6b4baeede..3746222bed1 100644 --- a/arch/arm/plat-omap/fb.c +++ b/arch/arm/plat-omap/fb.c @@ -206,9 +206,10 @@ void __init omapfb_reserve_sdram(void) config_invalid = 1; return; } - if (rg.paddr) + if (rg.paddr) { reserve_bootmem(rg.paddr, rg.size, BOOTMEM_DEFAULT); - reserved += rg.size; + reserved += rg.size; + } omapfb_config.mem_desc.region[i] = rg; configured_regions++; } diff --git a/arch/arm/plat-omap/gpio.c b/arch/arm/plat-omap/gpio.c index 17d7afe42b8..ee0b21f5b09 100644 --- a/arch/arm/plat-omap/gpio.c +++ b/arch/arm/plat-omap/gpio.c @@ -307,7 +307,7 @@ static inline int gpio_valid(int gpio) return 0; if (cpu_is_omap24xx() && gpio < 128) return 0; - if (cpu_is_omap34xx() && gpio < 160) + if (cpu_is_omap34xx() && gpio < 192) return 0; return -1; } diff --git a/arch/arm/plat-s3c/clock.c b/arch/arm/plat-s3c/clock.c index b6be76e2fe5..4d01ef1a25d 100644 --- a/arch/arm/plat-s3c/clock.c +++ b/arch/arm/plat-s3c/clock.c @@ -306,8 +306,6 @@ struct clk s3c24xx_uclk = { int s3c24xx_register_clock(struct clk *clk) { - clk->owner = THIS_MODULE; - if (clk->enable == NULL) clk->enable = clk_null_enable; diff --git a/arch/arm/plat-s3c24xx/dma.c b/arch/arm/plat-s3c24xx/dma.c index aee2aeb46c6..07326f63236 100644 --- a/arch/arm/plat-s3c24xx/dma.c +++ b/arch/arm/plat-s3c24xx/dma.c @@ -1235,7 +1235,7 @@ int s3c2410_dma_getposition(unsigned int channel, dma_addr_t *src, dma_addr_t *d EXPORT_SYMBOL(s3c2410_dma_getposition); -static struct s3c2410_dma_chan *to_dma_chan(struct sys_device *dev) +static inline struct s3c2410_dma_chan *to_dma_chan(struct sys_device *dev) { return container_of(dev, struct s3c2410_dma_chan, dev); } diff --git a/arch/arm/plat-s3c64xx/gpiolib.c b/arch/arm/plat-s3c64xx/gpiolib.c index ee9188add8f..78ee52cffc9 100644 --- a/arch/arm/plat-s3c64xx/gpiolib.c +++ b/arch/arm/plat-s3c64xx/gpiolib.c @@ -57,7 +57,7 @@ #if 1 #define gpio_dbg(x...) do { } while(0) #else -#define gpio_dbg(x...) printk(KERN_DEBUG ## x) +#define gpio_dbg(x...) printk(KERN_DEBUG x) #endif /* The s3c64xx_gpiolib_4bit routines are to control the gpio banks where diff --git a/arch/arm/plat-s3c64xx/include/plat/gpio-bank-h.h b/arch/arm/plat-s3c64xx/include/plat/gpio-bank-h.h index 81549516572..2ba1767512d 100644 --- a/arch/arm/plat-s3c64xx/include/plat/gpio-bank-h.h +++ b/arch/arm/plat-s3c64xx/include/plat/gpio-bank-h.h @@ -61,14 +61,14 @@ #define S3C64XX_GPH7_ADDR_CF1 (0x06 << 28) #define S3C64XX_GPH7_EINT_G6_7 (0x07 << 28) -#define S3C64XX_GPH8_MMC1_DATA6 (0x02 << 32) -#define S3C64XX_GPH8_MMC2_DATA2 (0x03 << 32) -#define S3C64XX_GPH8_I2S_V40_LRCLK (0x05 << 32) -#define S3C64XX_GPH8_ADDR_CF2 (0x06 << 32) -#define S3C64XX_GPH8_EINT_G6_8 (0x07 << 32) - -#define S3C64XX_GPH9_MMC1_DATA7 (0x02 << 36) -#define S3C64XX_GPH9_MMC2_DATA3 (0x03 << 36) -#define S3C64XX_GPH9_I2S_V40_DI (0x05 << 36) -#define S3C64XX_GPH9_EINT_G6_9 (0x07 << 36) +#define S3C64XX_GPH8_MMC1_DATA6 (0x02 << 0) +#define S3C64XX_GPH8_MMC2_DATA2 (0x03 << 0) +#define S3C64XX_GPH8_I2S_V40_LRCLK (0x05 << 0) +#define S3C64XX_GPH8_ADDR_CF2 (0x06 << 0) +#define S3C64XX_GPH8_EINT_G6_8 (0x07 << 0) +#define S3C64XX_GPH9_OUTPUT (0x01 << 4) +#define S3C64XX_GPH9_MMC1_DATA7 (0x02 << 4) +#define S3C64XX_GPH9_MMC2_DATA3 (0x03 << 4) +#define S3C64XX_GPH9_I2S_V40_DI (0x05 << 4) +#define S3C64XX_GPH9_EINT_G6_9 (0x07 << 4) diff --git a/arch/arm/tools/mach-types b/arch/arm/tools/mach-types index 945e0d237a1..fec64678a63 100644 --- a/arch/arm/tools/mach-types +++ b/arch/arm/tools/mach-types @@ -12,7 +12,7 @@ # # http://www.arm.linux.org.uk/developer/machines/?action=new # -# Last update: Mon Mar 23 20:09:01 2009 +# Last update: Fri May 29 10:14:20 2009 # # machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number # @@ -916,7 +916,7 @@ nxdb500 MACH_NXDB500 NXDB500 905 apf9328 MACH_APF9328 APF9328 906 omap_wipoq MACH_OMAP_WIPOQ OMAP_WIPOQ 907 omap_twip MACH_OMAP_TWIP OMAP_TWIP 908 -palmt650 MACH_PALMT650 PALMT650 909 +treo650 MACH_TREO650 TREO650 909 acumen MACH_ACUMEN ACUMEN 910 xp100 MACH_XP100 XP100 911 fs2410 MACH_FS2410 FS2410 912 @@ -1232,7 +1232,7 @@ ql202b MACH_QL202B QL202B 1226 vpac270 MACH_VPAC270 VPAC270 1227 rd129 MACH_RD129 RD129 1228 htcwizard MACH_HTCWIZARD HTCWIZARD 1229 -xscale_treo680 MACH_XSCALE_TREO680 XSCALE_TREO680 1230 +treo680 MACH_TREO680 TREO680 1230 tecon_tmezon MACH_TECON_TMEZON TECON_TMEZON 1231 zylonite MACH_ZYLONITE ZYLONITE 1233 gene1270 MACH_GENE1270 GENE1270 1234 @@ -1418,10 +1418,10 @@ looxc550 MACH_LOOXC550 LOOXC550 1417 cnty_titan MACH_CNTY_TITAN CNTY_TITAN 1418 app3xx MACH_APP3XX APP3XX 1419 sideoatsgrama MACH_SIDEOATSGRAMA SIDEOATSGRAMA 1420 -palmtreo700p MACH_PALMTREO700P PALMTREO700P 1421 -palmtreo700w MACH_PALMTREO700W PALMTREO700W 1422 -palmtreo750 MACH_PALMTREO750 PALMTREO750 1423 -palmtreo755p MACH_PALMTREO755P PALMTREO755P 1424 +treo700p MACH_TREO700P TREO700P 1421 +treo700w MACH_TREO700W TREO700W 1422 +treo750 MACH_TREO750 TREO750 1423 +treo755p MACH_TREO755P TREO755P 1424 ezreganut9200 MACH_EZREGANUT9200 EZREGANUT9200 1425 sarge MACH_SARGE SARGE 1426 a696 MACH_A696 A696 1427 @@ -1721,7 +1721,7 @@ sapphire MACH_SAPPHIRE SAPPHIRE 1729 csb637xo MACH_CSB637XO CSB637XO 1730 evisiong MACH_EVISIONG EVISIONG 1731 stmp37xx MACH_STMP37XX STMP37XX 1732 -stmp378x MACH_STMP38XX STMP38XX 1733 +stmp378x MACH_STMP378X STMP378X 1733 tnt MACH_TNT TNT 1734 tbxt MACH_TBXT TBXT 1735 playmate MACH_PLAYMATE PLAYMATE 1736 @@ -1817,7 +1817,7 @@ smdkc100 MACH_SMDKC100 SMDKC100 1826 tavorevb MACH_TAVOREVB TAVOREVB 1827 saar MACH_SAAR SAAR 1828 deister_eyecam MACH_DEISTER_EYECAM DEISTER_EYECAM 1829 -at91sam9m10ek MACH_AT91SAM9M10EK AT91SAM9M10EK 1830 +at91sam9m10g45ek MACH_AT91SAM9M10G45EK AT91SAM9M10G45EK 1830 linkstation_produo MACH_LINKSTATION_PRODUO LINKSTATION_PRODUO 1831 hit_b0 MACH_HIT_B0 HIT_B0 1832 adx_rmu MACH_ADX_RMU ADX_RMU 1833 @@ -2132,3 +2132,116 @@ apollo MACH_APOLLO APOLLO 2141 at91cap9stk MACH_AT91CAP9STK AT91CAP9STK 2142 spc300 MACH_SPC300 SPC300 2143 eko MACH_EKO EKO 2144 +ccw9m2443 MACH_CCW9M2443 CCW9M2443 2145 +ccw9m2443js MACH_CCW9M2443JS CCW9M2443JS 2146 +m2m_router_device MACH_M2M_ROUTER_DEVICE M2M_ROUTER_DEVICE 2147 +str9104nas MACH_STAR9104NAS STAR9104NAS 2148 +pca100 MACH_PCA100 PCA100 2149 +z3_dm365_mod_01 MACH_Z3_DM365_MOD_01 Z3_DM365_MOD_01 2150 +hipox MACH_HIPOX HIPOX 2151 +omap3_piteds MACH_OMAP3_PITEDS OMAP3_PITEDS 2152 +bm150r MACH_BM150R BM150R 2153 +tbone MACH_TBONE TBONE 2154 +merlin MACH_MERLIN MERLIN 2155 +falcon MACH_FALCON FALCON 2156 +davinci_da850_evm MACH_DAVINCI_DA850_EVM DAVINCI_DA850_EVM 2157 +s5p6440 MACH_S5P6440 S5P6440 2158 +at91sam9g10ek MACH_AT91SAM9G10EK AT91SAM9G10EK 2159 +omap_4430sdp MACH_OMAP_4430SDP OMAP_4430SDP 2160 +lpc313x MACH_LPC313X LPC313X 2161 +magx_zn5 MACH_MAGX_ZN5 MAGX_ZN5 2162 +magx_em30 MACH_MAGX_EM30 MAGX_EM30 2163 +magx_ve66 MACH_MAGX_VE66 MAGX_VE66 2164 +meesc MACH_MEESC MEESC 2165 +otc570 MACH_OTC570 OTC570 2166 +bcu2412 MACH_BCU2412 BCU2412 2167 +beacon MACH_BEACON BEACON 2168 +actia_tgw MACH_ACTIA_TGW ACTIA_TGW 2169 +e4430 MACH_E4430 E4430 2170 +ql300 MACH_QL300 QL300 2171 +btmavb101 MACH_BTMAVB101 BTMAVB101 2172 +btmawb101 MACH_BTMAWB101 BTMAWB101 2173 +sq201 MACH_SQ201 SQ201 2174 +quatro45xx MACH_QUATRO45XX QUATRO45XX 2175 +openpad MACH_OPENPAD OPENPAD 2176 +tx25 MACH_TX25 TX25 2177 +omap3_torpedo MACH_OMAP3_TORPEDO OMAP3_TORPEDO 2178 +htcraphael_k MACH_HTCRAPHAEL_K HTCRAPHAEL_K 2179 +lal43 MACH_LAL43 LAL43 2181 +htcraphael_cdma500 MACH_HTCRAPHAEL_CDMA500 HTCRAPHAEL_CDMA500 2182 +anw6410 MACH_ANW6410 ANW6410 2183 +htcprophet MACH_HTCPROPHET HTCPROPHET 2185 +cfa_10022 MACH_CFA_10022 CFA_10022 2186 +imx27_visstrim_m10 MACH_IMX27_VISSTRIM_M10 IMX27_VISSTRIM_M10 2187 +px2imx27 MACH_PX2IMX27 PX2IMX27 2188 +stm3210e_eval MACH_STM3210E_EVAL STM3210E_EVAL 2189 +dvs10 MACH_DVS10 DVS10 2190 +portuxg20 MACH_PORTUXG20 PORTUXG20 2191 +arm_spv MACH_ARM_SPV ARM_SPV 2192 +smdkc110 MACH_SMDKC110 SMDKC110 2193 +cabespresso MACH_CABESPRESSO CABESPRESSO 2194 +hmc800 MACH_HMC800 HMC800 2195 +sholes MACH_SHOLES SHOLES 2196 +btmxc31 MACH_BTMXC31 BTMXC31 2197 +dt501 MACH_DT501 DT501 2198 +ktx MACH_KTX KTX 2199 +omap3517evm MACH_OMAP3517EVM OMAP3517EVM 2200 +netspace_v2 MACH_NETSPACE_V2 NETSPACE_V2 2201 +netspace_max_v2 MACH_NETSPACE_MAX_V2 NETSPACE_MAX_V2 2202 +d2net_v2 MACH_D2NET_V2 D2NET_V2 2203 +net2big_v2 MACH_NET2BIG_V2 NET2BIG_V2 2204 +net4big_v2 MACH_NET4BIG_V2 NET4BIG_V2 2205 +net5big_v2 MACH_NET5BIG_V2 NET5BIG_V2 2206 +endb2443 MACH_ENDB2443 ENDB2443 2207 +inetspace_v2 MACH_INETSPACE_V2 INETSPACE_V2 2208 +tros MACH_TROS TROS 2209 +pelco_homer MACH_PELCO_HOMER PELCO_HOMER 2210 +ofsp8 MACH_OFSP8 OFSP8 2211 +at91sam9g45ekes MACH_AT91SAM9G45EKES AT91SAM9G45EKES 2212 +guf_cupid MACH_GUF_CUPID GUF_CUPID 2213 +eab1r MACH_EAB1R EAB1R 2214 +desirec MACH_DESIREC DESIREC 2215 +cordoba MACH_CORDOBA CORDOBA 2216 +irvine MACH_IRVINE IRVINE 2217 +sff772 MACH_SFF772 SFF772 2218 +pelco_milano MACH_PELCO_MILANO PELCO_MILANO 2219 +pc7302 MACH_PC7302 PC7302 2220 +bip6000 MACH_BIP6000 BIP6000 2221 +silvermoon MACH_SILVERMOON SILVERMOON 2222 +vc0830 MACH_VC0830 VC0830 2223 +dt430 MACH_DT430 DT430 2224 +ji42pf MACH_JI42PF JI42PF 2225 +gnet_ksm MACH_GNET_KSM GNET_KSM 2226 +gnet_sgm MACH_GNET_SGM GNET_SGM 2227 +gnet_sgr MACH_GNET_SGR GNET_SGR 2228 +omap3_icetekevm MACH_OMAP3_ICETEKEVM OMAP3_ICETEKEVM 2229 +pnp MACH_PNP PNP 2230 +ctera_2bay_k MACH_CTERA_2BAY_K CTERA_2BAY_K 2231 +ctera_2bay_u MACH_CTERA_2BAY_U CTERA_2BAY_U 2232 +sas_c MACH_SAS_C SAS_C 2233 +vma2315 MACH_VMA2315 VMA2315 2234 +vcs MACH_VCS VCS 2235 +spear600 MACH_SPEAR600 SPEAR600 2236 +spear300 MACH_SPEAR300 SPEAR300 2237 +spear1300 MACH_SPEAR1300 SPEAR1300 2238 +lilly1131 MACH_LILLY1131 LILLY1131 2239 +arvoo_ax301 MACH_ARVOO_AX301 ARVOO_AX301 2240 +mapphone MACH_MAPPHONE MAPPHONE 2241 +legend MACH_LEGEND LEGEND 2242 +salsa MACH_SALSA SALSA 2243 +lounge MACH_LOUNGE LOUNGE 2244 +vision MACH_VISION VISION 2245 +vmb20 MACH_VMB20 VMB20 2246 +hy2410 MACH_HY2410 HY2410 2247 +hy9315 MACH_HY9315 HY9315 2248 +bullwinkle MACH_BULLWINKLE BULLWINKLE 2249 +arm_ultimator2 MACH_ARM_ULTIMATOR2 ARM_ULTIMATOR2 2250 +vs_v210 MACH_VS_V210 VS_V210 2252 +vs_v212 MACH_VS_V212 VS_V212 2253 +hmt MACH_HMT HMT 2254 +suen3 MACH_SUEN3 SUEN3 2255 +vesper MACH_VESPER VESPER 2256 +str9 MACH_STR9 STR9 2257 +omap3_wl_ff MACH_OMAP3_WL_FF OMAP3_WL_FF 2258 +simcom MACH_SIMCOM SIMCOM 2259 +mcwebio MACH_MCWEBIO MCWEBIO 2260 diff --git a/arch/blackfin/include/asm/.gitignore b/arch/blackfin/include/asm/.gitignore deleted file mode 100644 index 7858564a446..00000000000 --- a/arch/blackfin/include/asm/.gitignore +++ /dev/null @@ -1 +0,0 @@ -+mach diff --git a/arch/blackfin/include/asm/flat.h b/arch/blackfin/include/asm/flat.h index e70074e05f4..733a178d782 100644 --- a/arch/blackfin/include/asm/flat.h +++ b/arch/blackfin/include/asm/flat.h @@ -10,7 +10,6 @@ #include -#define flat_stack_align(sp) /* nothing needed */ #define flat_argvp_envp_on_stack() 0 #define flat_old_ram_flag(flags) (flags) diff --git a/arch/blackfin/include/asm/unistd.h b/arch/blackfin/include/asm/unistd.h index 1e57b636e0b..cf5066d3efd 100644 --- a/arch/blackfin/include/asm/unistd.h +++ b/arch/blackfin/include/asm/unistd.h @@ -378,8 +378,10 @@ #define __NR_dup3 363 #define __NR_pipe2 364 #define __NR_inotify_init1 365 +#define __NR_preadv 366 +#define __NR_pwritev 367 -#define __NR_syscall 366 +#define __NR_syscall 368 #define NR_syscalls __NR_syscall /* Old optional stuff no one actually uses */ diff --git a/arch/blackfin/kernel/.gitignore b/arch/blackfin/kernel/.gitignore new file mode 100644 index 00000000000..c5f676c3c22 --- /dev/null +++ b/arch/blackfin/kernel/.gitignore @@ -0,0 +1 @@ +vmlinux.lds diff --git a/arch/blackfin/lib/strncmp.c b/arch/blackfin/lib/strncmp.c index 2aaae78a68e..46518b1d298 100644 --- a/arch/blackfin/lib/strncmp.c +++ b/arch/blackfin/lib/strncmp.c @@ -8,9 +8,8 @@ #define strncmp __inline_strncmp #include -#undef strncmp - #include +#undef strncmp int strncmp(const char *cs, const char *ct, size_t count) { diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S index 21e65a339a2..a063a434f7e 100644 --- a/arch/blackfin/mach-common/entry.S +++ b/arch/blackfin/mach-common/entry.S @@ -1581,6 +1581,8 @@ ENTRY(_sys_call_table) .long _sys_dup3 .long _sys_pipe2 .long _sys_inotify_init1 /* 365 */ + .long _sys_preadv + .long _sys_pwritev .rept NR_syscalls-(.-_sys_call_table)/4 .long _sys_ni_syscall diff --git a/arch/cris/Makefile b/arch/cris/Makefile index 3662cfb7b61..71e17d3eedd 100644 --- a/arch/cris/Makefile +++ b/arch/cris/Makefile @@ -70,7 +70,7 @@ SRC_ARCH = $(srctree)/arch/cris # cris object files path OBJ_ARCH = $(objtree)/arch/cris -boot := arch/cris/$(SARCH)/boot +boot := arch/cris/boot MACHINE := arch/cris/$(SARCH) all: zImage @@ -81,15 +81,15 @@ zImage Image: vmlinux archprepare: archclean: - $(Q)if [ -e arch/cris/$(SARCH)/boot ]; then \ - $(MAKE) $(clean)=arch/cris/$(SARCH)/boot; \ + $(Q)if [ -e arch/cris/boot ]; then \ + $(MAKE) $(clean)=arch/cris/boot; \ fi CLEAN_FILES += \ - $(MACHINE)/boot/zImage \ - $(MACHINE)/boot/compressed/decompress.bin \ - $(MACHINE)/boot/compressed/piggy.gz \ - $(MACHINE)/boot/rescue/rescue.bin + $(boot)/zImage \ + $(boot)/compressed/decompress.bin \ + $(boot)/compressed/piggy.gz \ + $(boot)/rescue/rescue.bin # MRPROPER_FILES += diff --git a/arch/cris/arch-v10/boot/compressed/README b/arch/cris/arch-v10/boot/compressed/README deleted file mode 100644 index 48b3db9924b..00000000000 --- a/arch/cris/arch-v10/boot/compressed/README +++ /dev/null @@ -1,25 +0,0 @@ -Creation of the self-extracting compressed kernel image (vmlinuz) ------------------------------------------------------------------ -$Id: README,v 1.1 2001/12/17 13:59:27 bjornw Exp $ - -This can be slightly confusing because it's a process with many steps. - -The kernel object built by the arch/etrax100/Makefile, vmlinux, is split -by that makefile into text and data binary files, vmlinux.text and -vmlinux.data. - -Those files together with a ROM filesystem can be catted together and -burned into a flash or executed directly at the DRAM origin. - -They can also be catted together and compressed with gzip, which is what -happens in this makefile. Together they make up piggy.img. - -The decompressor is built into the file decompress.o. It is turned into -the binary file decompress.bin, which is catted together with piggy.img -into the file vmlinuz. It can be executed in an arbitrary place in flash. - -Be careful - it assumes some things about free locations in DRAM. It -assumes the DRAM starts at 0x40000000 and that it is at least 8 MB, -so it puts its code at 0x40700000, and initial stack at 0x40800000. - --Bjorn diff --git a/arch/cris/arch-v10/boot/compressed/misc.c b/arch/cris/arch-v10/boot/compressed/misc.c deleted file mode 100644 index a4db1507d3b..00000000000 --- a/arch/cris/arch-v10/boot/compressed/misc.c +++ /dev/null @@ -1,246 +0,0 @@ -/* - * misc.c - * - * This is a collection of several routines from gzip-1.0.3 - * adapted for Linux. - * - * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994 - * puts by Nick Holloway 1993, better puts by Martin Mares 1995 - * adaptation for Linux/CRIS Axis Communications AB, 1999 - * - */ - -/* where the piggybacked kernel image expects itself to live. - * it is the same address we use when we network load an uncompressed - * image into DRAM, and it is the address the kernel is linked to live - * at by vmlinux.lds.S - */ - -#define KERNEL_LOAD_ADR 0x40004000 - - -#include -#include - -/* - * gzip declarations - */ - -#define OF(args) args -#define STATIC static - -void *memset(void *s, int c, size_t n); -void *memcpy(void *__dest, __const void *__src, size_t __n); - -#define memzero(s, n) memset((s), 0, (n)) - -typedef unsigned char uch; -typedef unsigned short ush; -typedef unsigned long ulg; - -#define WSIZE 0x8000 /* Window size must be at least 32k, */ - /* and a power of two */ - -static uch *inbuf; /* input buffer */ -static uch window[WSIZE]; /* Sliding window buffer */ - -unsigned inptr = 0; /* index of next byte to be processed in inbuf - * After decompression it will contain the - * compressed size, and head.S will read it. - */ - -static unsigned outcnt = 0; /* bytes in output buffer */ - -/* gzip flag byte */ -#define ASCII_FLAG 0x01 /* bit 0 set: file probably ascii text */ -#define CONTINUATION 0x02 /* bit 1 set: continuation of multi-part gzip file */ -#define EXTRA_FIELD 0x04 /* bit 2 set: extra field present */ -#define ORIG_NAME 0x08 /* bit 3 set: original file name present */ -#define COMMENT 0x10 /* bit 4 set: file comment present */ -#define ENCRYPTED 0x20 /* bit 5 set: file is encrypted */ -#define RESERVED 0xC0 /* bit 6,7: reserved */ - -#define get_byte() (inbuf[inptr++]) - -/* Diagnostic functions */ -#ifdef DEBUG -# define Assert(cond, msg) do { \ - if (!(cond)) \ - error(msg); \ - } while (0) -# define Trace(x) fprintf x -# define Tracev(x) do { \ - if (verbose) \ - fprintf x; \ - } while (0) -# define Tracevv(x) do { \ - if (verbose > 1) \ - fprintf x; \ - } while (0) -# define Tracec(c, x) do { \ - if (verbose && (c)) \ - fprintf x; \ - } while (0) -# define Tracecv(c, x) do { \ - if (verbose > 1 && (c)) \ - fprintf x; \ - } while (0) -#else -# define Assert(cond, msg) -# define Trace(x) -# define Tracev(x) -# define Tracevv(x) -# define Tracec(c, x) -# define Tracecv(c, x) -#endif - -static void flush_window(void); -static void error(char *m); - -extern char *input_data; /* lives in head.S */ - -static long bytes_out = 0; -static uch *output_data; -static unsigned long output_ptr = 0; -static void puts(const char *); - -/* the "heap" is put directly after the BSS ends, at end */ - -extern int _end; -static long free_mem_ptr = (long)&_end; -static long free_mem_end_ptr; - -#include "../../../../../lib/inflate.c" - -/* decompressor info and error messages to serial console */ - -static void -puts(const char *s) -{ -#ifndef CONFIG_ETRAX_DEBUG_PORT_NULL - while (*s) { -#ifdef CONFIG_ETRAX_DEBUG_PORT0 - while (!(*R_SERIAL0_STATUS & (1 << 5))) ; - *R_SERIAL0_TR_DATA = *s++; -#endif -#ifdef CONFIG_ETRAX_DEBUG_PORT1 - while (!(*R_SERIAL1_STATUS & (1 << 5))) ; - *R_SERIAL1_TR_DATA = *s++; -#endif -#ifdef CONFIG_ETRAX_DEBUG_PORT2 - while (!(*R_SERIAL2_STATUS & (1 << 5))) ; - *R_SERIAL2_TR_DATA = *s++; -#endif -#ifdef CONFIG_ETRAX_DEBUG_PORT3 - while (!(*R_SERIAL3_STATUS & (1 << 5))) ; - *R_SERIAL3_TR_DATA = *s++; -#endif - } -#endif -} - -void *memset(void *s, int c, size_t n) -{ - int i; - char *ss = (char *)s; - - for (i = 0; i < n; i++) - ss[i] = c; - - return s; -} - -void *memcpy(void *__dest, __const void *__src, size_t __n) -{ - int i; - char *d = (char *)__dest, *s = (char *)__src; - - for (i = 0; i < __n; i++) - d[i] = s[i]; - - return __dest; -} - -/* =========================================================================== - * Write the output window window[0..outcnt-1] and update crc and bytes_out. - * (Used for the decompressed data only.) - */ - -static void flush_window(void) -{ - ulg c = crc; /* temporary variable */ - unsigned n; - uch *in, *out, ch; - - in = window; - out = &output_data[output_ptr]; - for (n = 0; n < outcnt; n++) { - ch = *out = *in; - out++; - in++; - c = crc_32_tab[((int)c ^ ch) & 0xff] ^ (c >> 8); - } - crc = c; - bytes_out += (ulg)outcnt; - output_ptr += (ulg)outcnt; - outcnt = 0; -} - -static void error(char *x) -{ - puts("\n\n"); - puts(x); - puts("\n\n -- System halted\n"); - - while (1); /* Halt */ -} - -void setup_normal_output_buffer(void) -{ - output_data = (char *)KERNEL_LOAD_ADR; -} - -void decompress_kernel(void) -{ - char revision; - - /* input_data is set in head.S */ - inbuf = input_data; - -#ifdef CONFIG_ETRAX_DEBUG_PORT0 - *R_SERIAL0_XOFF = 0; - *R_SERIAL0_BAUD = 0x99; - *R_SERIAL0_TR_CTRL = 0x40; -#endif -#ifdef CONFIG_ETRAX_DEBUG_PORT1 - *R_SERIAL1_XOFF = 0; - *R_SERIAL1_BAUD = 0x99; - *R_SERIAL1_TR_CTRL = 0x40; -#endif -#ifdef CONFIG_ETRAX_DEBUG_PORT2 - *R_GEN_CONFIG = 0x08; - *R_SERIAL2_XOFF = 0; - *R_SERIAL2_BAUD = 0x99; - *R_SERIAL2_TR_CTRL = 0x40; -#endif -#ifdef CONFIG_ETRAX_DEBUG_PORT3 - *R_GEN_CONFIG = 0x100; - *R_SERIAL3_XOFF = 0; - *R_SERIAL3_BAUD = 0x99; - *R_SERIAL3_TR_CTRL = 0x40; -#endif - - setup_normal_output_buffer(); - - makecrc(); - - __asm__ volatile ("move $vr,%0" : "=rm" (revision)); - if (revision < 10) { - puts("You need an ETRAX 100LX to run linux 2.6\n"); - while (1); - } - - puts("Uncompressing Linux...\n"); - gunzip(); - puts("Done. Now booting the kernel.\n"); -} diff --git a/arch/cris/arch-v10/kernel/entry.S b/arch/cris/arch-v10/kernel/entry.S index 72f5cd319b9..2c18d08cd91 100644 --- a/arch/cris/arch-v10/kernel/entry.S +++ b/arch/cris/arch-v10/kernel/entry.S @@ -536,10 +536,10 @@ multiple_interrupt: movem $r13, [$sp] push $r10 ; push orig_r10 clear.d [$sp=$sp-4] ; frametype == 0, normal frame - + move.d $sp, $r10 jsr do_multiple_IRQ - + jump ret_from_intr do_sigtrap: @@ -585,7 +585,7 @@ _ugdb_handle_breakpoint: pop $r0 ; Restore r0. ba do_sigtrap ; SIGTRAP the offending process. pop $dccr ; Restore dccr in delay slot. - + .global kernel_execve kernel_execve: move.d __NR_execve, $r9 @@ -929,6 +929,14 @@ sys_call_table: .long sys_fallocate .long sys_timerfd_settime /* 325 */ .long sys_timerfd_gettime + .long sys_signalfd4 + .long sys_eventfd2 + .long sys_epoll_create1 + .long sys_dup3 /* 330 */ + .long sys_pipe2 + .long sys_inotify_init1 + .long sys_preadv + .long sys_pwritev /* * NOTE!! This doesn't have to be exact - we just have diff --git a/arch/cris/arch-v32/boot/Makefile b/arch/cris/arch-v32/boot/Makefile deleted file mode 100644 index 99896ad60b3..00000000000 --- a/arch/cris/arch-v32/boot/Makefile +++ /dev/null @@ -1,20 +0,0 @@ -# -# arch/cris/arch-v32/boot/Makefile -# - -OBJCOPYFLAGS = -O binary -R .note -R .comment - -subdir- := compressed rescue -targets := Image - -$(obj)/Image: vmlinux FORCE - $(call if_changed,objcopy) - @echo ' Kernel: $@ is ready' - -$(obj)/compressed/vmlinux: $(obj)/Image FORCE - $(Q)$(MAKE) $(build)=$(obj)/compressed $@ - $(Q)$(MAKE) $(build)=$(obj)/rescue $(obj)/rescue/rescue.bin - -$(obj)/zImage: $(obj)/compressed/vmlinux - @cp $< $@ - @echo ' Kernel: $@ is ready' diff --git a/arch/cris/arch-v32/boot/compressed/Makefile b/arch/cris/arch-v32/boot/compressed/Makefile deleted file mode 100644 index e176b8b69d9..00000000000 --- a/arch/cris/arch-v32/boot/compressed/Makefile +++ /dev/null @@ -1,26 +0,0 @@ -# -# arch/cris/arch-v32/boot/compressed/Makefile -# - -asflags-y += -I$(srctree)/include/asm/mach/ -I$(srctree)/include/asm/arch -ccflags-y += -O2 -I$(srctree)/include/asm/mach/ -I$(srctree)/include/asm/arch -ldflags-y += -T$(srctree)/$(src)/decompress.lds -OBJECTS = $(obj)/head.o $(obj)/misc.o -OBJCOPYFLAGS = -O binary --remove-section=.bss - -quiet_cmd_image = BUILD $@ -cmd_image = cat $(obj)/decompress.bin $(obj)/piggy.gz > $@ - -targets := vmlinux piggy.gz decompress.o decompress.bin - -$(obj)/decompress.o: $(OBJECTS) FORCE - $(call if_changed,ld) - -$(obj)/decompress.bin: $(obj)/decompress.o FORCE - $(call if_changed,objcopy) - -$(obj)/vmlinux: $(obj)/piggy.gz $(obj)/decompress.bin FORCE - $(call if_changed,image) - -$(obj)/piggy.gz: $(obj)/../Image FORCE - $(call if_changed,gzip) diff --git a/arch/cris/arch-v32/boot/rescue/Makefile b/arch/cris/arch-v32/boot/rescue/Makefile deleted file mode 100644 index 566aac663a3..00000000000 --- a/arch/cris/arch-v32/boot/rescue/Makefile +++ /dev/null @@ -1,26 +0,0 @@ -# -# Makefile for rescue (bootstrap) code -# - -CC = gcc-cris -mlinux -march=v32 $(LINUXINCLUDE) -ccflags-y += -O2 -I $(srctree)/include/asm/arch/mach/ \ - -I $(srctree)/include/asm/arch -asflags-y += -I $(srctree)/include/asm/arch/mach/ -I $(srctree)/include/asm/arch -LD = gcc-cris -mlinux -march=v32 -nostdlib -ldflags-y += -T $(srctree)/$(src)/rescue.lds -LDPOSTFLAGS = -lgcc -OBJCOPYFLAGS = -O binary --remove-section=.bss -obj-$(CONFIG_ETRAX_AXISFLASHMAP) = head.o -OBJECT := $(obj)/head.o - -targets := rescue.o rescue.bin - -quiet_cmd_ldlibgcc = LD $@ -cmd_ldlibgcc = $(LD) $(LDFLAGS) $(filter-out FORCE,$^) $(LDPOSTFLAGS) -o $@ - -$(obj)/rescue.o: $(OBJECTS) FORCE - $(call if_changed,ldlibgcc) - -$(obj)/rescue.bin: $(obj)/rescue.o FORCE - $(call if_changed,objcopy) - cp -p $(obj)/rescue.bin $(objtree) diff --git a/arch/cris/arch-v32/drivers/mach-a3/gpio.c b/arch/cris/arch-v32/drivers/mach-a3/gpio.c index 7a87bc0ae2e..97357cfd17b 100644 --- a/arch/cris/arch-v32/drivers/mach-a3/gpio.c +++ b/arch/cris/arch-v32/drivers/mach-a3/gpio.c @@ -681,7 +681,7 @@ static int virtual_gpio_ioctl(struct file *file, unsigned int cmd, shadow |= ~readl(dir_oe[priv->minor]) | (arg & changeable_bits[priv->minor]); i2c_write(VIRT_I2C_ADDR, (void *)&shadow, sizeof(shadow)); - spin_lock_irqrestore(&gpio_lock, flags); + spin_unlock_irqrestore(&gpio_lock, flags); break; case IO_CLRBITS: spin_lock_irqsave(&gpio_lock, flags); @@ -690,7 +690,7 @@ static int virtual_gpio_ioctl(struct file *file, unsigned int cmd, shadow |= ~readl(dir_oe[priv->minor]) & ~(arg & changeable_bits[priv->minor]); i2c_write(VIRT_I2C_ADDR, (void *)&shadow, sizeof(shadow)); - spin_lock_irqrestore(&gpio_lock, flags); + spin_unlock_irqrestore(&gpio_lock, flags); break; case IO_HIGHALARM: /* Set alarm when bits with 1 in arg go high. */ diff --git a/arch/cris/arch-v32/kernel/Makefile b/arch/cris/arch-v32/kernel/Makefile index 993d987b007..40358355d0c 100644 --- a/arch/cris/arch-v32/kernel/Makefile +++ b/arch/cris/arch-v32/kernel/Makefile @@ -9,8 +9,6 @@ obj-y := entry.o traps.o irq.o debugport.o \ process.o ptrace.o setup.o signal.o traps.o time.o \ cache.o cacheflush.o -obj-$(CONFIG_ETRAXFS_SIM) += vcs_hook.o - obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_ETRAX_KGDB) += kgdb.o kgdb_asm.o obj-$(CONFIG_ETRAX_FAST_TIMER) += fasttimer.o diff --git a/arch/cris/arch-v32/kernel/entry.S b/arch/cris/arch-v32/kernel/entry.S index 5e674c8f7c5..435b9671bd4 100644 --- a/arch/cris/arch-v32/kernel/entry.S +++ b/arch/cris/arch-v32/kernel/entry.S @@ -852,6 +852,14 @@ sys_call_table: .long sys_fallocate .long sys_timerfd_settime /* 325 */ .long sys_timerfd_gettime + .long sys_signalfd4 + .long sys_eventfd2 + .long sys_epoll_create1 + .long sys_dup3 /* 330 */ + .long sys_pipe2 + .long sys_inotify_init1 + .long sys_preadv + .long sys_pwritev /* * NOTE!! This doesn't have to be exact - we just have diff --git a/arch/cris/arch-v10/boot/.gitignore b/arch/cris/boot/.gitignore similarity index 100% rename from arch/cris/arch-v10/boot/.gitignore rename to arch/cris/boot/.gitignore diff --git a/arch/cris/arch-v10/boot/Makefile b/arch/cris/boot/Makefile similarity index 65% rename from arch/cris/arch-v10/boot/Makefile rename to arch/cris/boot/Makefile index 21720301443..144f3afa011 100644 --- a/arch/cris/arch-v10/boot/Makefile +++ b/arch/cris/boot/Makefile @@ -1,8 +1,12 @@ # -# arch/cris/arch-v10/boot/Makefile +# arch/cris/boot/Makefile # -OBJCOPYFLAGS = -O binary --remove-section=.bss +objcopyflags-$(CONFIG_ETRAX_ARCH_V10) += -R .note -R .comment +objcopyflags-$(CONFIG_ETRAX_ARCH_V32) += --remove-section=.bss + +OBJCOPYFLAGS = -O binary $(objcopyflags-y) + subdir- := compressed rescue targets := Image diff --git a/arch/cris/arch-v10/boot/compressed/Makefile b/arch/cris/boot/compressed/Makefile similarity index 50% rename from arch/cris/arch-v10/boot/compressed/Makefile rename to arch/cris/boot/compressed/Makefile index 6fe0ffaf3be..8fe9338c177 100644 --- a/arch/cris/arch-v10/boot/compressed/Makefile +++ b/arch/cris/boot/compressed/Makefile @@ -1,11 +1,23 @@ # -# arch/cris/arch-v10/boot/compressed/Makefile +# arch/cris/boot/compressed/Makefile # asflags-y += $(LINUXINCLUDE) ccflags-y += -O2 $(LINUXINCLUDE) -ldflags-y += -T $(srctree)/$(src)/decompress.lds -OBJECTS = $(obj)/head.o $(obj)/misc.o + +# asflags-$(CONFIG_ETRAX_ARCH_V32) += -I$(srctree)/include/asm/mach \ +# -I$(srctree)/include/asm/arch +# ccflags-$(CONFIG_ETRAX_ARCH_V32) += -O2 -I$(srctree)/include/asm/mach +# -I$(srctree)/include/asm/arch + +arch-$(CONFIG_ETRAX_ARCH_V10) = v10 +arch-$(CONFIG_ETRAX_ARCH_V32) = v32 + +ldflags-y += -T $(srctree)/$(src)/decompress_$(arch-y).lds + +OBJECTS-$(CONFIG_ETRAX_ARCH_V32) = $(obj)/head_v32.o +OBJECTS-$(CONFIG_ETRAX_ARCH_V10) = $(obj)/head_v10.o +OBJECTS= $(OBJECTS-y) $(obj)/misc.o OBJCOPYFLAGS = -O binary --remove-section=.bss quiet_cmd_image = BUILD $@ @@ -24,4 +36,3 @@ $(obj)/vmlinux: $(obj)/piggy.gz $(obj)/decompress.bin FORCE $(obj)/piggy.gz: $(obj)/../Image FORCE $(call if_changed,gzip) - diff --git a/arch/cris/arch-v32/boot/compressed/README b/arch/cris/boot/compressed/README similarity index 100% rename from arch/cris/arch-v32/boot/compressed/README rename to arch/cris/boot/compressed/README diff --git a/arch/cris/arch-v10/boot/compressed/decompress.lds b/arch/cris/boot/compressed/decompress_v10.lds similarity index 100% rename from arch/cris/arch-v10/boot/compressed/decompress.lds rename to arch/cris/boot/compressed/decompress_v10.lds diff --git a/arch/cris/arch-v32/boot/compressed/decompress.lds b/arch/cris/boot/compressed/decompress_v32.lds similarity index 100% rename from arch/cris/arch-v32/boot/compressed/decompress.lds rename to arch/cris/boot/compressed/decompress_v32.lds diff --git a/arch/cris/arch-v10/boot/compressed/head.S b/arch/cris/boot/compressed/head_v10.S similarity index 97% rename from arch/cris/arch-v10/boot/compressed/head.S rename to arch/cris/boot/compressed/head_v10.S index 0bb4dcc2925..9edb8ade7e1 100644 --- a/arch/cris/arch-v10/boot/compressed/head.S +++ b/arch/cris/boot/compressed/head_v10.S @@ -30,7 +30,7 @@ beq dram_init_finished nop -#include "../../lib/dram_init.S" +#include "../../arch-v10/lib/dram_init.S" dram_init_finished: @@ -123,4 +123,4 @@ _cmd_line_magic: .dword 0 _cmd_line_addr: .dword 0 -#include "../../lib/hw_settings.S" +#include "../../arch-v10/lib/hw_settings.S" diff --git a/arch/cris/arch-v32/boot/compressed/head.S b/arch/cris/boot/compressed/head_v32.S similarity index 94% rename from arch/cris/arch-v32/boot/compressed/head.S rename to arch/cris/boot/compressed/head_v32.S index a4a65c5c669..f483005f3d4 100644 --- a/arch/cris/arch-v32/boot/compressed/head.S +++ b/arch/cris/boot/compressed/head_v32.S @@ -17,7 +17,7 @@ .globl input_data .text -_start: +start: di ;; Start clocks for used blocks. @@ -29,9 +29,9 @@ _start: nop #if defined CONFIG_ETRAXFS -#include "../../mach-fs/dram_init.S" +#include "../../arch-v32/mach-fs/dram_init.S" #elif defined CONFIG_CRIS_MACH_ARTPEC3 -#include "../../mach-a3/dram_init.S" +#include "../../arch-v32/mach-a3/dram_init.S" #else #error Only ETRAXFS and ARTPEC-3 supported! #endif @@ -137,9 +137,9 @@ _boot_source: .dword 0 #if defined CONFIG_ETRAXFS -#include "../../mach-fs/hw_settings.S" +#include "../../arch-v32/mach-fs/hw_settings.S" #elif defined CONFIG_CRIS_MACH_ARTPEC3 -#include "../../mach-a3/hw_settings.S" +#include "../../arch-v32/mach-a3/hw_settings.S" #else #error Only ETRAXFS and ARTPEC-3 supported! #endif diff --git a/arch/cris/arch-v32/boot/compressed/misc.c b/arch/cris/boot/compressed/misc.c similarity index 65% rename from arch/cris/arch-v32/boot/compressed/misc.c rename to arch/cris/boot/compressed/misc.c index 3595e16e82b..47bc190ba6d 100644 --- a/arch/cris/arch-v32/boot/compressed/misc.c +++ b/arch/cris/boot/compressed/misc.c @@ -18,8 +18,9 @@ #define KERNEL_LOAD_ADR 0x40004000 - #include + +#ifdef CONFIG_ETRAX_ARCH_V32 #include #include #include @@ -27,6 +28,9 @@ #ifdef CONFIG_CRIS_MACH_ARTPEC3 #include #endif +#else +#include +#endif /* * gzip declarations @@ -35,12 +39,10 @@ #define OF(args) args #define STATIC static -void* memset(void* s, int c, size_t n); -void* memcpy(void* __dest, __const void* __src, - size_t __n); - -#define memzero(s, n) memset ((s), 0, (n)) +void *memset(void *s, int c, size_t n); +void *memcpy(void *__dest, __const void *__src, size_t __n); +#define memzero(s, n) memset((s), 0, (n)) typedef unsigned char uch; typedef unsigned short ush; @@ -68,27 +70,43 @@ static unsigned outcnt = 0; /* bytes in output buffer */ #define ENCRYPTED 0x20 /* bit 5 set: file is encrypted */ #define RESERVED 0xC0 /* bit 6,7: reserved */ -#define get_byte() inbuf[inptr++] +#define get_byte() (inbuf[inptr++]) /* Diagnostic functions */ #ifdef DEBUG -# define Assert(cond,msg) {if(!(cond)) error(msg);} +# define Assert(cond, msg) do { \ + if (!(cond)) \ + error(msg); \ + } while (0) # define Trace(x) fprintf x -# define Tracev(x) {if (verbose) fprintf x ;} -# define Tracevv(x) {if (verbose>1) fprintf x ;} -# define Tracec(c,x) {if (verbose && (c)) fprintf x ;} -# define Tracecv(c,x) {if (verbose>1 && (c)) fprintf x ;} +# define Tracev(x) do { \ + if (verbose) \ + fprintf x; \ + } while (0) +# define Tracevv(x) do { \ + if (verbose > 1) \ + fprintf x; \ + } while (0) +# define Tracec(c, x) do { \ + if (verbose && (c)) \ + fprintf x; \ + } while (0) +# define Tracecv(c, x) do { \ + if (verbose > 1 && (c)) \ + fprintf x; \ + } while (0) #else -# define Assert(cond,msg) +# define Assert(cond, msg) # define Trace(x) # define Tracev(x) # define Tracevv(x) -# define Tracec(c,x) -# define Tracecv(c,x) +# define Tracec(c, x) +# define Tracecv(c, x) #endif static void flush_window(void); static void error(char *m); +static void puts(const char *); extern char *input_data; /* lives in head.S */ @@ -96,10 +114,6 @@ static long bytes_out; static uch *output_data; static unsigned long output_ptr; -static void error(char *m); - -static void puts(const char *); - /* the "heap" is put directly after the BSS ends, at end */ extern int _end; @@ -110,8 +124,8 @@ static long free_mem_end_ptr; /* decompressor info and error messages to serial console */ -static inline void -serout(const char *s, reg_scope_instances regi_ser) +#ifdef CONFIG_ETRAX_ARCH_V32 +static inline void serout(const char *s, reg_scope_instances regi_ser) { reg_ser_rs_stat_din rs; reg_ser_rw_dout dout = {.data = *s}; @@ -123,23 +137,47 @@ serout(const char *s, reg_scope_instances regi_ser) REG_WR(ser, regi_ser, rw_dout, dout); } +#endif -static void -puts(const char *s) +static void puts(const char *s) { #ifndef CONFIG_ETRAX_DEBUG_PORT_NULL while (*s) { #ifdef CONFIG_ETRAX_DEBUG_PORT0 +#ifdef CONFIG_ETRAX_ARCH_V32 serout(s, regi_ser0); +#else + while (!(*R_SERIAL0_STATUS & (1 << 5))) + ; + *R_SERIAL0_TR_DATA = *s++; +#endif #endif #ifdef CONFIG_ETRAX_DEBUG_PORT1 +#ifdef CONFIG_ETRAX_ARCH_V32 serout(s, regi_ser1); +#else + while (!(*R_SERIAL1_STATUS & (1 << 5))) + ; + *R_SERIAL1_TR_DATA = *s++; +#endif #endif #ifdef CONFIG_ETRAX_DEBUG_PORT2 +#ifdef CONFIG_ETRAX_ARCH_V32 serout(s, regi_ser2); +#else + while (!(*R_SERIAL2_STATUS & (1 << 5))) + ; + *R_SERIAL2_TR_DATA = *s++; +#endif #endif #ifdef CONFIG_ETRAX_DEBUG_PORT3 +#ifdef CONFIG_ETRAX_ARCH_V32 serout(s, regi_ser3); +#else + while (!(*R_SERIAL3_STATUS & (1 << 5))) + ; + *R_SERIAL3_TR_DATA = *s++; +#endif #endif *s++; } @@ -147,8 +185,7 @@ puts(const char *s) #endif } -void* -memset(void* s, int c, size_t n) +void *memset(void *s, int c, size_t n) { int i; char *ss = (char*)s; @@ -158,14 +195,13 @@ memset(void* s, int c, size_t n) return s; } -void* -memcpy(void* __dest, __const void* __src, - size_t __n) +void *memcpy(void *__dest, __const void *__src, size_t __n) { int i; char *d = (char *)__dest, *s = (char *)__src; - for (i=0;i<__n;i++) d[i] = s[i]; + for (i = 0; i < __n; i++) + d[i] = s[i]; return __dest; } @@ -175,43 +211,42 @@ memcpy(void* __dest, __const void* __src, * (Used for the decompressed data only.) */ -static void -flush_window() +static void flush_window(void) { - ulg c = crc; /* temporary variable */ - unsigned n; - uch *in, *out, ch; + ulg c = crc; /* temporary variable */ + unsigned n; + uch *in, *out, ch; - in = window; - out = &output_data[output_ptr]; - for (n = 0; n < outcnt; n++) { - ch = *out++ = *in++; - c = crc_32_tab[((int)c ^ ch) & 0xff] ^ (c >> 8); - } - crc = c; - bytes_out += (ulg)outcnt; - output_ptr += (ulg)outcnt; - outcnt = 0; + in = window; + out = &output_data[output_ptr]; + for (n = 0; n < outcnt; n++) { + ch = *out = *in; + out++; + in++; + c = crc_32_tab[((int)c ^ ch) & 0xff] ^ (c >> 8); + } + crc = c; + bytes_out += (ulg)outcnt; + output_ptr += (ulg)outcnt; + outcnt = 0; } -static void -error(char *x) +static void error(char *x) { - puts("\r\n\n"); + puts("\n\n"); puts(x); - puts("\r\n\n -- System halted\n"); + puts("\n\n -- System halted\n"); while(1); /* Halt */ } -void -setup_normal_output_buffer(void) +void setup_normal_output_buffer(void) { output_data = (char *)KERNEL_LOAD_ADR; } -static inline void -serial_setup(reg_scope_instances regi_ser) +#ifdef CONFIG_ETRAX_ARCH_V32 +static inline void serial_setup(reg_scope_instances regi_ser) { reg_ser_rw_xoff xoff; reg_ser_rw_tr_ctrl tr_ctrl; @@ -252,12 +287,16 @@ serial_setup(reg_scope_instances regi_ser) REG_WR(ser, regi_ser, rw_rec_ctrl, rec_ctrl); REG_WR(ser, regi_ser, rw_rec_baud_div, rec_baud); } +#endif -void -decompress_kernel(void) +void decompress_kernel(void) { char revision; + char compile_rev; +#ifdef CONFIG_ETRAX_ARCH_V32 + /* Need at least a CRISv32 to run. */ + compile_rev = 32; #if defined(CONFIG_ETRAX_DEBUG_PORT1) || \ defined(CONFIG_ETRAX_DEBUG_PORT2) || \ defined(CONFIG_ETRAX_DEBUG_PORT3) @@ -277,6 +316,7 @@ decompress_kernel(void) hwprot = REG_RD(pinmux, regi_pinmux, rw_hwprot); #endif + #ifdef CONFIG_ETRAX_DEBUG_PORT0 serial_setup(regi_ser0); #endif @@ -300,19 +340,52 @@ decompress_kernel(void) /* input_data is set in head.S */ inbuf = input_data; +#else /* CRISv10 */ + /* Need at least a crisv10 to run. */ + compile_rev = 10; + + /* input_data is set in head.S */ + inbuf = input_data; + +#ifdef CONFIG_ETRAX_DEBUG_PORT0 + *R_SERIAL0_XOFF = 0; + *R_SERIAL0_BAUD = 0x99; + *R_SERIAL0_TR_CTRL = 0x40; +#endif +#ifdef CONFIG_ETRAX_DEBUG_PORT1 + *R_SERIAL1_XOFF = 0; + *R_SERIAL1_BAUD = 0x99; + *R_SERIAL1_TR_CTRL = 0x40; +#endif +#ifdef CONFIG_ETRAX_DEBUG_PORT2 + *R_GEN_CONFIG = 0x08; + *R_SERIAL2_XOFF = 0; + *R_SERIAL2_BAUD = 0x99; + *R_SERIAL2_TR_CTRL = 0x40; +#endif +#ifdef CONFIG_ETRAX_DEBUG_PORT3 + *R_GEN_CONFIG = 0x100; + *R_SERIAL3_XOFF = 0; + *R_SERIAL3_BAUD = 0x99; + *R_SERIAL3_TR_CTRL = 0x40; +#endif +#endif setup_normal_output_buffer(); makecrc(); __asm__ volatile ("move $vr,%0" : "=rm" (revision)); - if (revision < 32) - { - puts("You need an ETRAX FS to run Linux 2.6/crisv32.\r\n"); + if (revision < compile_rev) { +#ifdef CONFIG_ETRAX_ARCH_V32 + puts("You need an ETRAX FS to run Linux 2.6/crisv32\n"); +#else + puts("You need an ETRAX 100LX to run linux 2.6\n"); +#endif while(1); } - puts("Uncompressing Linux...\r\n"); + puts("Uncompressing Linux...\n"); gunzip(); - puts("Done. Now booting the kernel.\r\n"); + puts("Done. Now booting the kernel\n"); } diff --git a/arch/cris/arch-v10/boot/rescue/Makefile b/arch/cris/boot/rescue/Makefile similarity index 63% rename from arch/cris/arch-v10/boot/rescue/Makefile rename to arch/cris/boot/rescue/Makefile index 82ab59b968e..52bd0bd1dd2 100644 --- a/arch/cris/arch-v10/boot/rescue/Makefile +++ b/arch/cris/boot/rescue/Makefile @@ -2,16 +2,26 @@ # Makefile for rescue (bootstrap) code # -ccflags-y += -O2 $(LINUXINCLUDE) +# CC = gcc-cris -mlinux -march=v32 $(LINUXINCLUDE) +# ccflags-$(CONFIG_ETRAX_ARCH_V32) += -I$(srctree)/include/asm/arch/mach/ \ +# -I$(srctree)/include/asm/arch +# asflags-y += -I $(srctree)/include/asm/arch/mach/ -I $(srctree)/include/asm/arch +# LD = gcc-cris -mlinux -march=v32 -nostdlib + asflags-y += $(LINUXINCLUDE) -ldflags-y += -T $(srctree)/$(src)/rescue.lds +ccflags-y += -O2 $(LINUXINCLUDE) +arch-$(CONFIG_ETRAX_ARCH_V10) = v10 +arch-$(CONFIG_ETRAX_ARCH_V32) = v32 + +ldflags-y += -T $(srctree)/$(src)/rescue_$(arch-y).lds OBJCOPYFLAGS = -O binary --remove-section=.bss -obj-$(CONFIG_ETRAX_AXISFLASHMAP) = head.o -OBJECT := $(obj)/head.o +obj-$(CONFIG_ETRAX_ARCH_V32) = $(obj)/head_v32.o +obj-$(CONFIG_ETRAX_ARCH_V10) = $(obj)/head_v10.o +OBJECTS := $(obj-y) targets := rescue.o rescue.bin -$(obj)/rescue.o: $(OBJECT) FORCE +$(obj)/rescue.o: $(OBJECTS) FORCE $(call if_changed,ld) $(obj)/rescue.bin: $(obj)/rescue.o FORCE @@ -26,6 +36,7 @@ $(obj)/testrescue.bin: $(obj)/testrescue.o dd if=testrescue_tmp.bin of=$(obj)/testrescue.bin bs=1 count=784 rm tr.bin tmp2423 testrescue_tmp.bin + $(obj)/kimagerescue.bin: $(obj)/kimagerescue.o $(OBJCOPY) $(OBJCOPYFLAGS) $(obj)/kimagerescue.o ktr.bin # Pad it to 784 bytes, that's what the rescue loader expects @@ -33,3 +44,4 @@ $(obj)/kimagerescue.bin: $(obj)/kimagerescue.o cat ktr.bin tmp2423 >kimagerescue_tmp.bin dd if=kimagerescue_tmp.bin of=$(obj)/kimagerescue.bin bs=1 count=784 rm ktr.bin tmp2423 kimagerescue_tmp.bin + diff --git a/arch/cris/arch-v10/boot/rescue/head.S b/arch/cris/boot/rescue/head_v10.S similarity index 99% rename from arch/cris/arch-v10/boot/rescue/head.S rename to arch/cris/boot/rescue/head_v10.S index fb503d1eeea..2fafe247a25 100644 --- a/arch/cris/arch-v10/boot/rescue/head.S +++ b/arch/cris/boot/rescue/head_v10.S @@ -155,7 +155,7 @@ no_newjump: #endif ;; We need to setup the bus registers before we start using the DRAM -#include "../../lib/dram_init.S" +#include "../../../arch-v10/lib/dram_init.S" ;; we now should go through the checksum-table and check the listed ;; partitions for errors. diff --git a/arch/cris/arch-v32/boot/rescue/head.S b/arch/cris/boot/rescue/head_v32.S similarity index 100% rename from arch/cris/arch-v32/boot/rescue/head.S rename to arch/cris/boot/rescue/head_v32.S diff --git a/arch/cris/arch-v10/boot/rescue/kimagerescue.S b/arch/cris/boot/rescue/kimagerescue.S similarity index 100% rename from arch/cris/arch-v10/boot/rescue/kimagerescue.S rename to arch/cris/boot/rescue/kimagerescue.S diff --git a/arch/cris/arch-v10/boot/rescue/rescue.lds b/arch/cris/boot/rescue/rescue_v10.lds similarity index 100% rename from arch/cris/arch-v10/boot/rescue/rescue.lds rename to arch/cris/boot/rescue/rescue_v10.lds diff --git a/arch/cris/arch-v32/boot/rescue/rescue.lds b/arch/cris/boot/rescue/rescue_v32.lds similarity index 100% rename from arch/cris/arch-v32/boot/rescue/rescue.lds rename to arch/cris/boot/rescue/rescue_v32.lds diff --git a/arch/cris/arch-v10/boot/rescue/testrescue.S b/arch/cris/boot/rescue/testrescue.S similarity index 100% rename from arch/cris/arch-v10/boot/rescue/testrescue.S rename to arch/cris/boot/rescue/testrescue.S diff --git a/arch/cris/arch-v10/boot/tools/build.c b/arch/cris/boot/tools/build.c similarity index 100% rename from arch/cris/arch-v10/boot/tools/build.c rename to arch/cris/boot/tools/build.c diff --git a/arch/cris/include/asm/unistd.h b/arch/cris/include/asm/unistd.h index 235d076379d..c17079388bb 100644 --- a/arch/cris/include/asm/unistd.h +++ b/arch/cris/include/asm/unistd.h @@ -281,7 +281,7 @@ #define __NR_mbind 274 #define __NR_get_mempolicy 275 #define __NR_set_mempolicy 276 -#define __NR_mq_open 277 +#define __NR_mq_open 277 #define __NR_mq_unlink (__NR_mq_open+1) #define __NR_mq_timedsend (__NR_mq_open+2) #define __NR_mq_timedreceive (__NR_mq_open+3) @@ -331,10 +331,18 @@ #define __NR_fallocate 324 #define __NR_timerfd_settime 325 #define __NR_timerfd_gettime 326 +#define __NR_signalfd4 327 +#define __NR_eventfd2 328 +#define __NR_epoll_create1 329 +#define __NR_dup3 330 +#define __NR_pipe2 331 +#define __NR_inotify_init1 332 +#define __NR_preadv 333 +#define __NR_pwritev 334 #ifdef __KERNEL__ -#define NR_syscalls 327 +#define NR_syscalls 335 #include diff --git a/arch/h8300/include/asm/flat.h b/arch/h8300/include/asm/flat.h index 2a873508a9a..bd12b31b90e 100644 --- a/arch/h8300/include/asm/flat.h +++ b/arch/h8300/include/asm/flat.h @@ -5,7 +5,6 @@ #ifndef __H8300_FLAT_H__ #define __H8300_FLAT_H__ -#define flat_stack_align(sp) /* nothing needed */ #define flat_argvp_envp_on_stack() 1 #define flat_old_ram_flag(flags) 1 #define flat_reloc_valid(reloc, size) ((reloc) <= (size)) diff --git a/arch/m32r/include/asm/flat.h b/arch/m32r/include/asm/flat.h index d851cf0c4aa..5d711c4688f 100644 --- a/arch/m32r/include/asm/flat.h +++ b/arch/m32r/include/asm/flat.h @@ -12,7 +12,6 @@ #ifndef __ASM_M32R_FLAT_H #define __ASM_M32R_FLAT_H -#define flat_stack_align(sp) (*sp += (*sp & 3 ? (4 - (*sp & 3)): 0)) #define flat_argvp_envp_on_stack() 0 #define flat_old_ram_flag(flags) (flags) #define flat_set_persistent(relval, p) 0 diff --git a/arch/m68k/include/asm/flat.h b/arch/m68k/include/asm/flat.h index 814b5174a8e..a0e29079397 100644 --- a/arch/m68k/include/asm/flat.h +++ b/arch/m68k/include/asm/flat.h @@ -5,7 +5,6 @@ #ifndef __M68KNOMMU_FLAT_H__ #define __M68KNOMMU_FLAT_H__ -#define flat_stack_align(sp) /* nothing needed */ #define flat_argvp_envp_on_stack() 1 #define flat_old_ram_flag(flags) (flags) #define flat_reloc_valid(reloc, size) ((reloc) <= (size)) diff --git a/arch/microblaze/configs/nommu_defconfig b/arch/microblaze/configs/nommu_defconfig index beb7ecd7279..4ef6af0a8f3 100644 --- a/arch/microblaze/configs/nommu_defconfig +++ b/arch/microblaze/configs/nommu_defconfig @@ -1,7 +1,7 @@ # # Automatically generated make config: don't edit -# Linux kernel version: 2.6.29 -# Tue Mar 24 10:23:20 2009 +# Linux kernel version: 2.6.30-rc5 +# Mon May 11 09:01:02 2009 # CONFIG_MICROBLAZE=y # CONFIG_SWAP is not set @@ -32,6 +32,7 @@ CONFIG_LOCALVERSION_AUTO=y CONFIG_SYSVIPC=y CONFIG_SYSVIPC_SYSCTL=y CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_BSD_PROCESS_ACCT_V3=y # CONFIG_TASKSTATS is not set @@ -63,6 +64,7 @@ CONFIG_SYSCTL_SYSCALL=y CONFIG_KALLSYMS=y CONFIG_KALLSYMS_ALL=y CONFIG_KALLSYMS_EXTRA_PASS=y +# CONFIG_STRIP_ASM_SYMS is not set # CONFIG_HOTPLUG is not set CONFIG_PRINTK=y CONFIG_BUG=y @@ -80,6 +82,8 @@ CONFIG_SLAB=y # CONFIG_SLUB is not set # CONFIG_SLOB is not set # CONFIG_PROFILING is not set +# CONFIG_MARKERS is not set +# CONFIG_SLOW_WORK is not set # CONFIG_HAVE_GENERIC_DMA_COHERENT is not set CONFIG_SLABINFO=y CONFIG_RT_MUTEXES=y @@ -92,7 +96,6 @@ CONFIG_MODULE_UNLOAD=y # CONFIG_MODULE_SRCVERSION_ALL is not set CONFIG_BLOCK=y # CONFIG_LBD is not set -# CONFIG_BLK_DEV_IO_TRACE is not set # CONFIG_BLK_DEV_BSG is not set # CONFIG_BLK_DEV_INTEGRITY is not set @@ -166,6 +169,8 @@ CONFIG_SPLIT_PTLOCK_CPUS=4 # CONFIG_PHYS_ADDR_T_64BIT is not set CONFIG_ZONE_DMA_FLAG=0 CONFIG_VIRT_TO_BUS=y +CONFIG_UNEVICTABLE_LRU=y +CONFIG_NOMMU_INITIAL_TRIM_EXCESS=1 # # Exectuable file formats @@ -180,7 +185,6 @@ CONFIG_NET=y # # Networking options # -CONFIG_COMPAT_NET_DEV_OPS=y CONFIG_PACKET=y # CONFIG_PACKET_MMAP is not set CONFIG_UNIX=y @@ -232,6 +236,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic" # CONFIG_LAPB is not set # CONFIG_ECONET is not set # CONFIG_WAN_ROUTER is not set +# CONFIG_PHONET is not set # CONFIG_NET_SCHED is not set # CONFIG_DCB is not set @@ -244,7 +249,6 @@ CONFIG_DEFAULT_TCP_CONG="cubic" # CONFIG_IRDA is not set # CONFIG_BT is not set # CONFIG_AF_RXRPC is not set -# CONFIG_PHONET is not set CONFIG_WIRELESS=y # CONFIG_CFG80211 is not set CONFIG_WIRELESS_OLD_REGULATORY=y @@ -379,6 +383,7 @@ CONFIG_MISC_DEVICES=y # CONFIG_ATA is not set # CONFIG_MD is not set CONFIG_NETDEVICES=y +CONFIG_COMPAT_NET_DEV_OPS=y # CONFIG_DUMMY is not set # CONFIG_BONDING is not set # CONFIG_MACVLAN is not set @@ -388,6 +393,7 @@ CONFIG_NETDEVICES=y # CONFIG_PHYLIB is not set CONFIG_NET_ETHERNET=y # CONFIG_MII is not set +# CONFIG_ETHOC is not set # CONFIG_DNET is not set # CONFIG_IBM_NEW_EMAC_ZMII is not set # CONFIG_IBM_NEW_EMAC_RGMII is not set @@ -405,7 +411,6 @@ CONFIG_NETDEV_10000=y # # CONFIG_WLAN_PRE80211 is not set # CONFIG_WLAN_80211 is not set -# CONFIG_IWLWIFI_LEDS is not set # # Enable WiMAX (Networking options) to see the WiMAX drivers @@ -455,6 +460,7 @@ CONFIG_LEGACY_PTYS=y CONFIG_LEGACY_PTY_COUNT=256 # CONFIG_IPMI_HANDLER is not set CONFIG_HW_RANDOM=y +# CONFIG_HW_RANDOM_TIMERIOMEM is not set # CONFIG_RTC is not set # CONFIG_GEN_RTC is not set # CONFIG_R3964 is not set @@ -525,7 +531,7 @@ CONFIG_USB_SUPPORT=y # # -# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may also be needed; +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may # # CONFIG_USB_GADGET is not set @@ -538,6 +544,7 @@ CONFIG_USB_SUPPORT=y # CONFIG_ACCESSIBILITY is not set # CONFIG_RTC_CLASS is not set # CONFIG_DMADEVICES is not set +# CONFIG_AUXDISPLAY is not set # CONFIG_UIO is not set # CONFIG_STAGING is not set @@ -562,6 +569,11 @@ CONFIG_FILE_LOCKING=y # CONFIG_AUTOFS4_FS is not set # CONFIG_FUSE_FS is not set +# +# Caches +# +# CONFIG_FSCACHE is not set + # # CD-ROM/DVD Filesystems # @@ -601,8 +613,13 @@ CONFIG_CRAMFS=y # CONFIG_HPFS_FS is not set # CONFIG_QNX4FS_FS is not set CONFIG_ROMFS_FS=y +CONFIG_ROMFS_BACKED_BY_BLOCK=y +# CONFIG_ROMFS_BACKED_BY_MTD is not set +# CONFIG_ROMFS_BACKED_BY_BOTH is not set +CONFIG_ROMFS_ON_BLOCK=y # CONFIG_SYSV_FS is not set # CONFIG_UFS_FS is not set +# CONFIG_NILFS2_FS is not set CONFIG_NETWORK_FILESYSTEMS=y CONFIG_NFS_FS=y CONFIG_NFS_V3=y @@ -614,7 +631,6 @@ CONFIG_LOCKD_V4=y CONFIG_NFS_ACL_SUPPORT=y CONFIG_NFS_COMMON=y CONFIG_SUNRPC=y -# CONFIG_SUNRPC_REGISTER_V4 is not set # CONFIG_RPCSEC_GSS_KRB5 is not set # CONFIG_RPCSEC_GSS_SPKM3 is not set # CONFIG_SMB_FS is not set @@ -647,6 +663,9 @@ CONFIG_DEBUG_SHIRQ=y CONFIG_DETECT_SOFTLOCKUP=y CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=1 +CONFIG_DETECT_HUNG_TASK=y +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 CONFIG_SCHED_DEBUG=y CONFIG_SCHEDSTATS=y CONFIG_TIMER_STATS=y @@ -678,15 +697,8 @@ CONFIG_DEBUG_SG=y # CONFIG_DEBUG_BLOCK_EXT_DEVT is not set # CONFIG_FAULT_INJECTION is not set CONFIG_SYSCTL_SYSCALL_CHECK=y - -# -# Tracers -# -# CONFIG_SCHED_TRACER is not set -# CONFIG_CONTEXT_SWITCH_TRACER is not set -# CONFIG_BOOT_TRACER is not set -# CONFIG_TRACE_BRANCH_PROFILING is not set -# CONFIG_DYNAMIC_PRINTK_DEBUG is not set +# CONFIG_PAGE_POISONING is not set +# CONFIG_DYNAMIC_DEBUG is not set # CONFIG_SAMPLES is not set CONFIG_EARLY_PRINTK=y CONFIG_HEART_BEAT=y @@ -777,6 +789,7 @@ CONFIG_CRYPTO=y # Compression # # CONFIG_CRYPTO_DEFLATE is not set +# CONFIG_CRYPTO_ZLIB is not set # CONFIG_CRYPTO_LZO is not set # @@ -784,6 +797,7 @@ CONFIG_CRYPTO=y # # CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRYPTO_HW=y +# CONFIG_BINARY_PRINTF is not set # # Library routines @@ -797,8 +811,8 @@ CONFIG_GENERIC_FIND_LAST_BIT=y # CONFIG_CRC7 is not set # CONFIG_LIBCRC32C is not set CONFIG_ZLIB_INFLATE=y -CONFIG_PLIST=y CONFIG_HAS_IOMEM=y CONFIG_HAS_IOPORT=y CONFIG_HAS_DMA=y CONFIG_HAVE_LMB=y +CONFIG_NLATTR=y diff --git a/arch/microblaze/kernel/intc.c b/arch/microblaze/kernel/intc.c index a69d3e3c2fd..b15605299a5 100644 --- a/arch/microblaze/kernel/intc.c +++ b/arch/microblaze/kernel/intc.c @@ -137,8 +137,8 @@ void __init init_IRQ(void) intr_type = *(int *) of_get_property(intc, "xlnx,kind-of-intr", NULL); - if (intr_type >= (1 << nr_irq)) - printk(KERN_INFO " ERROR: Mishmash in king-of-intr param\n"); + if (intr_type >= (1 << (nr_irq + 1))) + printk(KERN_INFO " ERROR: Mismatch in kind-of-intr param\n"); #ifdef CONFIG_SELFMOD_INTC selfmod_function((int *) arr_func, intc_baseaddr); diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 998e5db8cc0..25f3b0a11ca 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -72,6 +72,7 @@ config MIPS_COBALT select IRQ_CPU select IRQ_GT641XX select PCI_GT64XXX_PCI0 + select PCI select SYS_HAS_CPU_NEVADA select SYS_HAS_EARLY_PRINTK select SYS_SUPPORTS_32BIT_KERNEL @@ -593,7 +594,7 @@ config WR_PPMC board, which is based on GT64120 bridge chip. config CAVIUM_OCTEON_SIMULATOR - bool "Support for the Cavium Networks Octeon Simulator" + bool "Cavium Networks Octeon Simulator" select CEVT_R4K select 64BIT_PHYS_ADDR select DMA_COHERENT @@ -607,7 +608,7 @@ config CAVIUM_OCTEON_SIMULATOR hardware. config CAVIUM_OCTEON_REFERENCE_BOARD - bool "Support for the Cavium Networks Octeon reference board" + bool "Cavium Networks Octeon reference board" select CEVT_R4K select 64BIT_PHYS_ADDR select DMA_COHERENT @@ -1411,13 +1412,12 @@ config PAGE_SIZE_4KB config PAGE_SIZE_8KB bool "8kB" - depends on EXPERIMENTAL && CPU_R8000 + depends on (EXPERIMENTAL && CPU_R8000) || CPU_CAVIUM_OCTEON help Using 8kB page size will result in higher performance kernel at the price of higher memory consumption. This option is available - only on the R8000 processor. Not that at the time of this writing - this option is still high experimental; there are also issues with - compatibility of user applications. + only on R8000 and cnMIPS processors. Note that you will need a + suitable Linux distribution to support this. config PAGE_SIZE_16KB bool "16kB" @@ -1428,6 +1428,15 @@ config PAGE_SIZE_16KB all non-R3000 family processors. Note that you will need a suitable Linux distribution to support this. +config PAGE_SIZE_32KB + bool "32kB" + depends on CPU_CAVIUM_OCTEON + help + Using 32kB page size will result in higher performance kernel at + the price of higher memory consumption. This option is available + only on cnMIPS cores. Note that you will need a suitable Linux + distribution to support this. + config PAGE_SIZE_64KB bool "64kB" depends on EXPERIMENTAL && !CPU_R3000 && !CPU_TX39XX @@ -1958,10 +1967,6 @@ config SECCOMP endmenu -config RWSEM_GENERIC_SPINLOCK - bool - default y - config LOCKDEP_SUPPORT bool default y diff --git a/arch/mips/Makefile b/arch/mips/Makefile index 8d544c7c9fe..c4cae9e6b80 100644 --- a/arch/mips/Makefile +++ b/arch/mips/Makefile @@ -14,8 +14,6 @@ KBUILD_DEFCONFIG := ip22_defconfig -cflags-y := -ffunction-sections - # # Select the object file format to substitute into the linker script. # @@ -50,6 +48,9 @@ ifneq ($(SUBARCH),$(ARCH)) endif endif +cflags-y := -ffunction-sections +cflags-y += $(call cc-option, -mno-check-zero-division) + ifdef CONFIG_32BIT ld-emul = $(32bit-emul) vmlinux-32 = vmlinux @@ -472,12 +473,12 @@ endif # Simplified: what IP22 does at 128MB+ in ksegN, IP28 does at 512MB+ in xkphys # ifdef CONFIG_SGI_IP28 - ifeq ($(call cc-option-yn,-mr10k-cache-barrier=1), n) - $(error gcc doesn't support needed option -mr10k-cache-barrier=1) + ifeq ($(call cc-option-yn,-mr10k-cache-barrier=store), n) + $(error gcc doesn't support needed option -mr10k-cache-barrier=store) endif endif core-$(CONFIG_SGI_IP28) += arch/mips/sgi-ip22/ -cflags-$(CONFIG_SGI_IP28) += -mr10k-cache-barrier=1 -I$(srctree)/arch/mips/include/asm/mach-ip28 +cflags-$(CONFIG_SGI_IP28) += -mr10k-cache-barrier=store -I$(srctree)/arch/mips/include/asm/mach-ip28 load-$(CONFIG_SGI_IP28) += 0xa800000020004000 # diff --git a/arch/mips/alchemy/common/time.c b/arch/mips/alchemy/common/time.c index f58d4ffb894..33fbae79af5 100644 --- a/arch/mips/alchemy/common/time.c +++ b/arch/mips/alchemy/common/time.c @@ -44,7 +44,7 @@ extern int allow_au1k_wait; /* default off for CP0 Counter */ -static cycle_t au1x_counter1_read(void) +static cycle_t au1x_counter1_read(struct clocksource *cs) { return au_readl(SYS_RTCREAD); } diff --git a/arch/mips/cavium-octeon/csrc-octeon.c b/arch/mips/cavium-octeon/csrc-octeon.c index 70fd92c3165..96110f217dc 100644 --- a/arch/mips/cavium-octeon/csrc-octeon.c +++ b/arch/mips/cavium-octeon/csrc-octeon.c @@ -38,7 +38,7 @@ void octeon_init_cvmcount(void) local_irq_restore(flags); } -static cycle_t octeon_cvmcount_read(void) +static cycle_t octeon_cvmcount_read(struct clocksource *cs) { return read_c0_cvmcount(); } diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h index bac4a960b24..b1e9e97a9c7 100644 --- a/arch/mips/include/asm/bitops.h +++ b/arch/mips/include/asm/bitops.h @@ -567,7 +567,7 @@ static inline unsigned long __fls(unsigned long word) int num; if (BITS_PER_LONG == 32 && - __builtin_constant_p(cpu_has_mips_r) && cpu_has_mips_r) { + __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) { __asm__( " .set push \n" " .set mips32 \n" @@ -644,7 +644,7 @@ static inline int fls(int x) { int r; - if (__builtin_constant_p(cpu_has_mips_r) && cpu_has_mips_r) { + if (__builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) { __asm__("clz %0, %1" : "=r" (x) : "r" (x)); return 32 - x; diff --git a/arch/mips/include/asm/checksum.h b/arch/mips/include/asm/checksum.h index 290485ac540..f2f7c6c264d 100644 --- a/arch/mips/include/asm/checksum.h +++ b/arch/mips/include/asm/checksum.h @@ -40,7 +40,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr) { - might_sleep(); + might_fault(); return __csum_partial_copy_user((__force void *)src, dst, len, sum, err_ptr); } @@ -53,7 +53,7 @@ static inline __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len, __wsum sum, int *err_ptr) { - might_sleep(); + might_fault(); if (access_ok(VERIFY_WRITE, dst, len)) return __csum_partial_copy_user(src, (__force void *)dst, len, sum, err_ptr); diff --git a/arch/mips/include/asm/compat.h b/arch/mips/include/asm/compat.h index 6c5b40905dd..f58aed354bf 100644 --- a/arch/mips/include/asm/compat.h +++ b/arch/mips/include/asm/compat.h @@ -3,7 +3,6 @@ /* * Architecture specific compatibility types */ -#include #include #include #include diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h index a0d14f85b78..c0047f86133 100644 --- a/arch/mips/include/asm/cpu-features.h +++ b/arch/mips/include/asm/cpu-features.h @@ -147,6 +147,15 @@ #define cpu_has_mips_r (cpu_has_mips32r1 | cpu_has_mips32r2 | \ cpu_has_mips64r1 | cpu_has_mips64r2) +/* + * MIPS32, MIPS64, VR5500, IDT32332, IDT32334 and maybe a few other + * pre-MIPS32/MIPS53 processors have CLO, CLZ. For 64-bit kernels + * cpu_has_clo_clz also indicates the availability of DCLO and DCLZ. + */ +# ifndef cpu_has_clo_clz +# define cpu_has_clo_clz cpu_has_mips_r +# endif + #ifndef cpu_has_dsp #define cpu_has_dsp (cpu_data[0].ases & MIPS_ASE_DSP) #endif diff --git a/arch/mips/include/asm/cpu-info.h b/arch/mips/include/asm/cpu-info.h index 744cd8fb107..126044308de 100644 --- a/arch/mips/include/asm/cpu-info.h +++ b/arch/mips/include/asm/cpu-info.h @@ -39,8 +39,8 @@ struct cache_desc { #define MIPS_CACHE_PINDEX 0x00000020 /* Physically indexed cache */ struct cpuinfo_mips { - unsigned long udelay_val; - unsigned long asid_cache; + unsigned int udelay_val; + unsigned int asid_cache; /* * Capability and feature descriptor structure for MIPS CPU diff --git a/arch/mips/include/asm/delay.h b/arch/mips/include/asm/delay.h index b0bccd2c4ed..a07e51b2be1 100644 --- a/arch/mips/include/asm/delay.h +++ b/arch/mips/include/asm/delay.h @@ -11,94 +11,12 @@ #ifndef _ASM_DELAY_H #define _ASM_DELAY_H -#include -#include +extern void __delay(unsigned int loops); +extern void __ndelay(unsigned int ns); +extern void __udelay(unsigned int us); -#include -#include - -static inline void __delay(unsigned long loops) -{ - if (sizeof(long) == 4) - __asm__ __volatile__ ( - " .set noreorder \n" - " .align 3 \n" - "1: bnez %0, 1b \n" - " subu %0, 1 \n" - " .set reorder \n" - : "=r" (loops) - : "0" (loops)); - else if (sizeof(long) == 8 && !DADDI_WAR) - __asm__ __volatile__ ( - " .set noreorder \n" - " .align 3 \n" - "1: bnez %0, 1b \n" - " dsubu %0, 1 \n" - " .set reorder \n" - : "=r" (loops) - : "0" (loops)); - else if (sizeof(long) == 8 && DADDI_WAR) - __asm__ __volatile__ ( - " .set noreorder \n" - " .align 3 \n" - "1: bnez %0, 1b \n" - " dsubu %0, %2 \n" - " .set reorder \n" - : "=r" (loops) - : "0" (loops), "r" (1)); -} - - -/* - * Division by multiplication: you don't have to worry about - * loss of precision. - * - * Use only for very small delays ( < 1 msec). Should probably use a - * lookup table, really, as the multiplications take much too long with - * short delays. This is a "reasonable" implementation, though (and the - * first constant multiplications gets optimized away if the delay is - * a constant) - */ - -static inline void __udelay(unsigned long usecs, unsigned long lpj) -{ - unsigned long hi, lo; - - /* - * The rates of 128 is rounded wrongly by the catchall case - * for 64-bit. Excessive precission? Probably ... - */ -#if defined(CONFIG_64BIT) && (HZ == 128) - usecs *= 0x0008637bd05af6c7UL; /* 2**64 / (1000000 / HZ) */ -#elif defined(CONFIG_64BIT) - usecs *= (0x8000000000000000UL / (500000 / HZ)); -#else /* 32-bit junk follows here */ - usecs *= (unsigned long) (((0x8000000000000000ULL / (500000 / HZ)) + - 0x80000000ULL) >> 32); -#endif - - if (sizeof(long) == 4) - __asm__("multu\t%2, %3" - : "=h" (usecs), "=l" (lo) - : "r" (usecs), "r" (lpj) - : GCC_REG_ACCUM); - else if (sizeof(long) == 8 && !R4000_WAR) - __asm__("dmultu\t%2, %3" - : "=h" (usecs), "=l" (lo) - : "r" (usecs), "r" (lpj) - : GCC_REG_ACCUM); - else if (sizeof(long) == 8 && R4000_WAR) - __asm__("dmultu\t%3, %4\n\tmfhi\t%0" - : "=r" (usecs), "=h" (hi), "=l" (lo) - : "r" (usecs), "r" (lpj) - : GCC_REG_ACCUM); - - __delay(usecs); -} - -#define __udelay_val cpu_data[raw_smp_processor_id()].udelay_val - -#define udelay(usecs) __udelay((usecs), __udelay_val) +#define ndelay(ns) __udelay(ns) +#define udelay(us) __udelay(us) /* make sure "usecs *= ..." in udelay do not overflow. */ #if HZ >= 1000 diff --git a/arch/mips/include/asm/div64.h b/arch/mips/include/asm/div64.h index d1d699105c1..dc5ea573644 100644 --- a/arch/mips/include/asm/div64.h +++ b/arch/mips/include/asm/div64.h @@ -6,105 +6,63 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. */ -#ifndef _ASM_DIV64_H -#define _ASM_DIV64_H +#ifndef __ASM_DIV64_H +#define __ASM_DIV64_H + +#include + +#if BITS_PER_LONG == 64 #include -#if (_MIPS_SZLONG == 32) - -#include - /* * No traps on overflows for any of these... */ -#define do_div64_32(res, high, low, base) ({ \ - unsigned long __quot32, __mod32; \ - unsigned long __cf, __tmp, __tmp2, __i; \ - \ - __asm__(".set push\n\t" \ - ".set noat\n\t" \ - ".set noreorder\n\t" \ - "move %2, $0\n\t" \ - "move %3, $0\n\t" \ - "b 1f\n\t" \ - " li %4, 0x21\n" \ - "0:\n\t" \ - "sll $1, %0, 0x1\n\t" \ - "srl %3, %0, 0x1f\n\t" \ - "or %0, $1, %5\n\t" \ - "sll %1, %1, 0x1\n\t" \ - "sll %2, %2, 0x1\n" \ - "1:\n\t" \ - "bnez %3, 2f\n\t" \ - " sltu %5, %0, %z6\n\t" \ - "bnez %5, 3f\n" \ - "2:\n\t" \ - " addiu %4, %4, -1\n\t" \ - "subu %0, %0, %z6\n\t" \ - "addiu %2, %2, 1\n" \ - "3:\n\t" \ - "bnez %4, 0b\n\t" \ - " srl %5, %1, 0x1f\n\t" \ - ".set pop" \ - : "=&r" (__mod32), "=&r" (__tmp), \ - "=&r" (__quot32), "=&r" (__cf), \ - "=&r" (__i), "=&r" (__tmp2) \ - : "Jr" (base), "0" (high), "1" (low)); \ - \ - (res) = __quot32; \ - __mod32; }) +#define __div64_32(n, base) \ +({ \ + unsigned long __cf, __tmp, __tmp2, __i; \ + unsigned long __quot32, __mod32; \ + unsigned long __high, __low; \ + unsigned long long __n; \ + \ + __high = *__n >> 32; \ + __low = __n; \ + __asm__( \ + " .set push \n" \ + " .set noat \n" \ + " .set noreorder \n" \ + " move %2, $0 \n" \ + " move %3, $0 \n" \ + " b 1f \n" \ + " li %4, 0x21 \n" \ + "0: \n" \ + " sll $1, %0, 0x1 \n" \ + " srl %3, %0, 0x1f \n" \ + " or %0, $1, %5 \n" \ + " sll %1, %1, 0x1 \n" \ + " sll %2, %2, 0x1 \n" \ + "1: \n" \ + " bnez %3, 2f \n" \ + " sltu %5, %0, %z6 \n" \ + " bnez %5, 3f \n" \ + "2: \n" \ + " addiu %4, %4, -1 \n" \ + " subu %0, %0, %z6 \n" \ + " addiu %2, %2, 1 \n" \ + "3: \n" \ + " bnez %4, 0b\n\t" \ + " srl %5, %1, 0x1f\n\t" \ + " .set pop" \ + : "=&r" (__mod32), "=&r" (__tmp), \ + "=&r" (__quot32), "=&r" (__cf), \ + "=&r" (__i), "=&r" (__tmp2) \ + : "Jr" (base), "0" (__high), "1" (__low)); \ + \ + (__n) = __quot32; \ + __mod32; \ +}) -#define do_div(n, base) ({ \ - unsigned long long __quot; \ - unsigned long __mod; \ - unsigned long long __div; \ - unsigned long __upper, __low, __high, __base; \ - \ - __div = (n); \ - __base = (base); \ - \ - __high = __div >> 32; \ - __low = __div; \ - __upper = __high; \ - \ - if (__high) \ - __asm__("divu $0, %z2, %z3" \ - : "=h" (__upper), "=l" (__high) \ - : "Jr" (__high), "Jr" (__base) \ - : GCC_REG_ACCUM); \ - \ - __mod = do_div64_32(__low, __upper, __low, __base); \ - \ - __quot = __high; \ - __quot = __quot << 32 | __low; \ - (n) = __quot; \ - __mod; }) +#endif /* BITS_PER_LONG == 64 */ -#endif /* (_MIPS_SZLONG == 32) */ - -#if (_MIPS_SZLONG == 64) - -/* - * Hey, we're already 64-bit, no - * need to play games.. - */ -#define do_div(n, base) ({ \ - unsigned long __quot; \ - unsigned int __mod; \ - unsigned long __div; \ - unsigned int __base; \ - \ - __div = (n); \ - __base = (base); \ - \ - __mod = __div % __base; \ - __quot = __div / __base; \ - \ - (n) = __quot; \ - __mod; }) - -#endif /* (_MIPS_SZLONG == 64) */ - -#endif /* _ASM_DIV64_H */ +#endif /* __ASM_DIV64_H */ diff --git a/arch/mips/include/asm/dma-mapping.h b/arch/mips/include/asm/dma-mapping.h index c64afb40cd0..d16afddb09a 100644 --- a/arch/mips/include/asm/dma-mapping.h +++ b/arch/mips/include/asm/dma-mapping.h @@ -24,8 +24,13 @@ extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction); extern dma_addr_t dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction); -extern void dma_unmap_page(struct device *dev, dma_addr_t dma_address, - size_t size, enum dma_data_direction direction); + +static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address, + size_t size, enum dma_data_direction direction) +{ + dma_unmap_single(dev, dma_address, size, direction); +} + extern void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, enum dma_data_direction direction); extern void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, diff --git a/arch/mips/include/asm/fixmap.h b/arch/mips/include/asm/fixmap.h index 9cc8522a394..0f5caa1307f 100644 --- a/arch/mips/include/asm/fixmap.h +++ b/arch/mips/include/asm/fixmap.h @@ -108,6 +108,9 @@ static inline unsigned long virt_to_fix(const unsigned long vaddr) return __virt_to_fix(vaddr); } +#define kmap_get_fixmap_pte(vaddr) \ + pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr)) + /* * Called from pgtable_init() */ diff --git a/arch/mips/include/asm/hazards.h b/arch/mips/include/asm/hazards.h index a12d971db4f..0eaf77ffbc4 100644 --- a/arch/mips/include/asm/hazards.h +++ b/arch/mips/include/asm/hazards.h @@ -138,8 +138,9 @@ do { \ __instruction_hazard(); \ } while (0) -#elif defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_CAVIUM_OCTEON) || \ - defined(CONFIG_CPU_R5500) || defined(CONFIG_MACH_ALCHEMY) +#elif defined(CONFIG_MACH_ALCHEMY) || defined(CONFIG_CPU_CAVIUM_OCTEON) || \ + defined(CONFIG_CPU_LOONGSON2) || defined(CONFIG_CPU_R10000) || \ + defined(CONFIG_CPU_R5500) /* * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer. diff --git a/arch/mips/include/asm/highmem.h b/arch/mips/include/asm/highmem.h index 4374ab2adc7..25adfb02923 100644 --- a/arch/mips/include/asm/highmem.h +++ b/arch/mips/include/asm/highmem.h @@ -30,8 +30,6 @@ /* declarations for highmem.c */ extern unsigned long highstart_pfn, highend_pfn; -extern pte_t *kmap_pte; -extern pgprot_t kmap_prot; extern pte_t *pkmap_page_table; /* @@ -62,6 +60,10 @@ extern struct page *__kmap_atomic_to_page(void *ptr); #define flush_cache_kmaps() flush_cache_all() +extern void kmap_init(void); + +#define kmap_prot PAGE_KERNEL + #endif /* __KERNEL__ */ #endif /* _ASM_HIGHMEM_H */ diff --git a/arch/mips/include/asm/ioctl.h b/arch/mips/include/asm/ioctl.h index 85067e248a8..916163401b2 100644 --- a/arch/mips/include/asm/ioctl.h +++ b/arch/mips/include/asm/ioctl.h @@ -60,12 +60,16 @@ ((nr) << _IOC_NRSHIFT) | \ ((size) << _IOC_SIZESHIFT)) +#ifdef __KERNEL__ /* provoke compile error for invalid uses of size argument */ extern unsigned int __invalid_size_argument_for_IOC; #define _IOC_TYPECHECK(t) \ ((sizeof(t) == sizeof(t[1]) && \ sizeof(t) < (1 << _IOC_SIZEBITS)) ? \ sizeof(t) : __invalid_size_argument_for_IOC) +#else +#define _IOC_TYPECHECK(t) (sizeof(t)) +#endif /* used to create numbers */ #define _IO(type, nr) _IOC(_IOC_NONE, (type), (nr), 0) diff --git a/arch/mips/include/asm/mach-au1x00/au1000.h b/arch/mips/include/asm/mach-au1x00/au1000.h index 62f91f50b5b..854e95f1b07 100644 --- a/arch/mips/include/asm/mach-au1x00/au1000.h +++ b/arch/mips/include/asm/mach-au1x00/au1000.h @@ -715,7 +715,7 @@ enum soc_au1500_ints { #ifdef CONFIG_SOC_AU1100 enum soc_au1100_ints { AU1100_FIRST_INT = MIPS_CPU_IRQ_BASE + 8, - AU1100_UART0_INT, + AU1100_UART0_INT = AU1100_FIRST_INT, AU1100_UART1_INT, AU1100_SD_INT, AU1100_UART3_INT, @@ -902,8 +902,8 @@ enum soc_au1200_ints { AU1000_RTC_MATCH0_INT, AU1000_RTC_MATCH1_INT, AU1000_RTC_MATCH2_INT, - - AU1200_NAND_INT = AU1200_FIRST_INT + 23, + AU1200_GPIO_203, + AU1200_NAND_INT, AU1200_GPIO_204, AU1200_GPIO_205, AU1200_GPIO_206, diff --git a/arch/mips/include/asm/mach-au1x00/au1xxx_ide.h b/arch/mips/include/asm/mach-au1x00/au1xxx_ide.h index 60638b8969b..5656c72de6d 100644 --- a/arch/mips/include/asm/mach-au1x00/au1xxx_ide.h +++ b/arch/mips/include/asm/mach-au1x00/au1xxx_ide.h @@ -46,20 +46,6 @@ #define CONFIG_BLK_DEV_IDE_AU1XXX_BURSTABLE_ON 0 #endif -#ifdef CONFIG_PM -/* - * This will enable the device to be powered up when write() or read() - * is called. If this is not defined, the driver will return -EBUSY. - */ -#define WAKE_ON_ACCESS 1 - -typedef struct { - spinlock_t lock; /* Used to block on state transitions */ - au1xxx_power_dev_t *dev; /* Power Managers device structure */ - unsigned stopped; /* Used to signal device is stopped */ -} pm_state; -#endif - typedef struct { u32 tx_dev_id, rx_dev_id, target_dev_id; u32 tx_chan, rx_chan; @@ -72,9 +58,6 @@ typedef struct { #endif int irq; u32 regbase; -#ifdef CONFIG_PM - pm_state pm; -#endif } _auide_hwif; /******************************************************************************/ diff --git a/arch/mips/include/asm/mach-lemote/cpu-feature-overrides.h b/arch/mips/include/asm/mach-lemote/cpu-feature-overrides.h new file mode 100644 index 00000000000..550a10dc9db --- /dev/null +++ b/arch/mips/include/asm/mach-lemote/cpu-feature-overrides.h @@ -0,0 +1,59 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2009 Wu Zhangjin + * Copyright (C) 2009 Philippe Vachon + * Copyright (C) 2009 Zhang Le + * + * reference: /proc/cpuinfo, + * arch/mips/kernel/cpu-probe.c(cpu_probe_legacy), + * arch/mips/kernel/proc.c(show_cpuinfo), + * loongson2f user manual. + */ + +#ifndef __ASM_MACH_LEMOTE_CPU_FEATURE_OVERRIDES_H +#define __ASM_MACH_LEMOTE_CPU_FEATURE_OVERRIDES_H + +#define cpu_dcache_line_size() 32 +#define cpu_icache_line_size() 32 +#define cpu_scache_line_size() 32 + + +#define cpu_has_32fpr 1 +#define cpu_has_3k_cache 0 +#define cpu_has_4k_cache 1 +#define cpu_has_4kex 1 +#define cpu_has_64bits 1 +#define cpu_has_cache_cdex_p 0 +#define cpu_has_cache_cdex_s 0 +#define cpu_has_counter 1 +#define cpu_has_dc_aliases 1 +#define cpu_has_divec 0 +#define cpu_has_dsp 0 +#define cpu_has_ejtag 0 +#define cpu_has_fpu 1 +#define cpu_has_ic_fills_f_dc 0 +#define cpu_has_inclusive_pcaches 1 +#define cpu_has_llsc 1 +#define cpu_has_mcheck 0 +#define cpu_has_mdmx 0 +#define cpu_has_mips16 0 +#define cpu_has_mips32r1 0 +#define cpu_has_mips32r2 0 +#define cpu_has_mips3d 0 +#define cpu_has_mips64r1 0 +#define cpu_has_mips64r2 0 +#define cpu_has_mipsmt 0 +#define cpu_has_prefetch 0 +#define cpu_has_smartmips 0 +#define cpu_has_tlb 1 +#define cpu_has_tx39_cache 0 +#define cpu_has_userlocal 0 +#define cpu_has_vce 0 +#define cpu_has_vtag_icache 0 +#define cpu_has_watch 1 +#define cpu_icache_snoops_remote_store 1 + +#endif /* __ASM_MACH_LEMOTE_CPU_FEATURE_OVERRIDES_H */ diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h index 526f327475c..32ef8bec5c8 100644 --- a/arch/mips/include/asm/mipsregs.h +++ b/arch/mips/include/asm/mipsregs.h @@ -184,12 +184,19 @@ #else #define PM_4K 0x00000000 +#define PM_8K 0x00002000 #define PM_16K 0x00006000 +#define PM_32K 0x0000e000 #define PM_64K 0x0001e000 +#define PM_128K 0x0003e000 #define PM_256K 0x0007e000 +#define PM_512K 0x000fe000 #define PM_1M 0x001fe000 +#define PM_2M 0x003fe000 #define PM_4M 0x007fe000 +#define PM_8M 0x00ffe000 #define PM_16M 0x01ffe000 +#define PM_32M 0x03ffe000 #define PM_64M 0x07ffe000 #define PM_256M 0x1fffe000 #define PM_1G 0x7fffe000 @@ -201,8 +208,12 @@ */ #ifdef CONFIG_PAGE_SIZE_4KB #define PM_DEFAULT_MASK PM_4K +#elif defined(CONFIG_PAGE_SIZE_8KB) +#define PM_DEFAULT_MASK PM_8K #elif defined(CONFIG_PAGE_SIZE_16KB) #define PM_DEFAULT_MASK PM_16K +#elif defined(CONFIG_PAGE_SIZE_32KB) +#define PM_DEFAULT_MASK PM_32K #elif defined(CONFIG_PAGE_SIZE_64KB) #define PM_DEFAULT_MASK PM_64K #else @@ -717,8 +728,8 @@ do { \ ".set\tmips64\n\t" \ "dmfc0\t%M0, " #source "\n\t" \ "dsll\t%L0, %M0, 32\n\t" \ - "dsrl\t%M0, %M0, 32\n\t" \ - "dsrl\t%L0, %L0, 32\n\t" \ + "dsra\t%M0, %M0, 32\n\t" \ + "dsra\t%L0, %L0, 32\n\t" \ ".set\tmips0" \ : "=r" (__val)); \ else \ @@ -726,8 +737,8 @@ do { \ ".set\tmips64\n\t" \ "dmfc0\t%M0, " #source ", " #sel "\n\t" \ "dsll\t%L0, %M0, 32\n\t" \ - "dsrl\t%M0, %M0, 32\n\t" \ - "dsrl\t%L0, %L0, 32\n\t" \ + "dsra\t%M0, %M0, 32\n\t" \ + "dsra\t%L0, %L0, 32\n\t" \ ".set\tmips0" \ : "=r" (__val)); \ local_irq_restore(__flags); \ @@ -1484,14 +1495,15 @@ static inline unsigned int \ set_c0_##name(unsigned int set) \ { \ unsigned int res; \ + unsigned int new; \ unsigned int omt; \ unsigned long flags; \ \ local_irq_save(flags); \ omt = __dmt(); \ res = read_c0_##name(); \ - res |= set; \ - write_c0_##name(res); \ + new = res | set; \ + write_c0_##name(new); \ __emt(omt); \ local_irq_restore(flags); \ \ @@ -1502,14 +1514,15 @@ static inline unsigned int \ clear_c0_##name(unsigned int clear) \ { \ unsigned int res; \ + unsigned int new; \ unsigned int omt; \ unsigned long flags; \ \ local_irq_save(flags); \ omt = __dmt(); \ res = read_c0_##name(); \ - res &= ~clear; \ - write_c0_##name(res); \ + new = res & ~clear; \ + write_c0_##name(new); \ __emt(omt); \ local_irq_restore(flags); \ \ @@ -1517,9 +1530,10 @@ clear_c0_##name(unsigned int clear) \ } \ \ static inline unsigned int \ -change_c0_##name(unsigned int change, unsigned int new) \ +change_c0_##name(unsigned int change, unsigned int newbits) \ { \ unsigned int res; \ + unsigned int new; \ unsigned int omt; \ unsigned long flags; \ \ @@ -1527,9 +1541,9 @@ change_c0_##name(unsigned int change, unsigned int new) \ \ omt = __dmt(); \ res = read_c0_##name(); \ - res &= ~change; \ - res |= (new & change); \ - write_c0_##name(res); \ + new = res & ~change; \ + new |= (newbits & change); \ + write_c0_##name(new); \ __emt(omt); \ local_irq_restore(flags); \ \ diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h index fe7a88ea066..9f946e4ca05 100644 --- a/arch/mips/include/asm/page.h +++ b/arch/mips/include/asm/page.h @@ -23,6 +23,9 @@ #ifdef CONFIG_PAGE_SIZE_16KB #define PAGE_SHIFT 14 #endif +#ifdef CONFIG_PAGE_SIZE_32KB +#define PAGE_SHIFT 15 +#endif #ifdef CONFIG_PAGE_SIZE_64KB #define PAGE_SHIFT 16 #endif diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h index 943515f0ef8..4ed9d1bba2b 100644 --- a/arch/mips/include/asm/pgtable-64.h +++ b/arch/mips/include/asm/pgtable-64.h @@ -83,6 +83,12 @@ #define PMD_ORDER 0 #define PTE_ORDER 0 #endif +#ifdef CONFIG_PAGE_SIZE_32KB +#define PGD_ORDER 0 +#define PUD_ORDER aieeee_attempt_to_allocate_pud +#define PMD_ORDER 0 +#define PTE_ORDER 0 +#endif #ifdef CONFIG_PAGE_SIZE_64KB #define PGD_ORDER 0 #define PUD_ORDER aieeee_attempt_to_allocate_pud diff --git a/arch/mips/include/asm/sn/addrs.h b/arch/mips/include/asm/sn/addrs.h index fec9bdd3491..3a56d90abfa 100644 --- a/arch/mips/include/asm/sn/addrs.h +++ b/arch/mips/include/asm/sn/addrs.h @@ -359,11 +359,11 @@ TO_NODE_UNCAC((nasid), LAUNCH_OFFSET(nasid, slice)) #define LAUNCH_SIZE(nasid) KLD_LAUNCH(nasid)->size -#define NMI_OFFSET(nasid, slice) \ +#define SN_NMI_OFFSET(nasid, slice) \ (KLD_NMI(nasid)->offset + \ KLD_NMI(nasid)->stride * (slice)) #define NMI_ADDR(nasid, slice) \ - TO_NODE_UNCAC((nasid), NMI_OFFSET(nasid, slice)) + TO_NODE_UNCAC((nasid), SN_NMI_OFFSET(nasid, slice)) #define NMI_SIZE(nasid) KLD_NMI(nasid)->size #define KLCONFIG_OFFSET(nasid) KLD_KLCONFIG(nasid)->offset diff --git a/arch/mips/include/asm/sn/nmi.h b/arch/mips/include/asm/sn/nmi.h index 6b7b0b5f372..1af49897d4e 100644 --- a/arch/mips/include/asm/sn/nmi.h +++ b/arch/mips/include/asm/sn/nmi.h @@ -3,13 +3,13 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * + * Derived from IRIX , Revision 1.5. + * * Copyright (C) 1992 - 1997 Silicon Graphics, Inc. */ #ifndef __ASM_SN_NMI_H #define __ASM_SN_NMI_H -#ident "$Revision: 1.5 $" - #include /* diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h index 676aa2ae191..143a48136a4 100644 --- a/arch/mips/include/asm/thread_info.h +++ b/arch/mips/include/asm/thread_info.h @@ -75,6 +75,9 @@ register struct thread_info *__current_thread_info __asm__("$28"); #ifdef CONFIG_PAGE_SIZE_16KB #define THREAD_SIZE_ORDER (0) #endif +#ifdef CONFIG_PAGE_SIZE_32KB +#define THREAD_SIZE_ORDER (0) +#endif #ifdef CONFIG_PAGE_SIZE_64KB #define THREAD_SIZE_ORDER (0) #endif diff --git a/arch/mips/include/asm/time.h b/arch/mips/include/asm/time.h index 38a30d2ee95..df6a430de5e 100644 --- a/arch/mips/include/asm/time.h +++ b/arch/mips/include/asm/time.h @@ -57,7 +57,11 @@ extern int r4k_clockevent_init(void); static inline int mips_clockevent_init(void) { -#ifdef CONFIG_CEVT_R4K +#ifdef CONFIG_MIPS_MT_SMTC + extern int smtc_clockevent_init(void); + + return smtc_clockevent_init(); +#elif defined(CONFIG_CEVT_R4K) return r4k_clockevent_init(); #else return -ENXIO; diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h index 09ff5bb1744..c2d53c18fd3 100644 --- a/arch/mips/include/asm/uaccess.h +++ b/arch/mips/include/asm/uaccess.h @@ -105,10 +105,20 @@ #define __access_mask get_fs().seg #define __access_ok(addr, size, mask) \ - (((signed long)((mask) & ((addr) | ((addr) + (size)) | __ua_size(size)))) == 0) +({ \ + unsigned long __addr = (unsigned long) (addr); \ + unsigned long __size = size; \ + unsigned long __mask = mask; \ + unsigned long __ok; \ + \ + __chk_user_ptr(addr); \ + __ok = (signed long)(__mask & (__addr | (__addr + __size) | \ + __ua_size(__size))); \ + __ok == 0; \ +}) #define access_ok(type, addr, size) \ - likely(__access_ok((unsigned long)(addr), (size), __access_mask)) + likely(__access_ok((addr), (size), __access_mask)) /* * put_user: - Write a simple value into user space. @@ -225,6 +235,7 @@ do { \ ({ \ int __gu_err; \ \ + __chk_user_ptr(ptr); \ __get_user_common((x), size, ptr); \ __gu_err; \ }) @@ -234,6 +245,7 @@ do { \ int __gu_err = -EFAULT; \ const __typeof__(*(ptr)) __user * __gu_ptr = (ptr); \ \ + might_fault(); \ if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) \ __get_user_common((x), size, __gu_ptr); \ \ @@ -305,6 +317,7 @@ do { \ __typeof__(*(ptr)) __pu_val; \ int __pu_err = 0; \ \ + __chk_user_ptr(ptr); \ __pu_val = (x); \ switch (size) { \ case 1: __put_user_asm("sb", ptr); break; \ @@ -322,6 +335,7 @@ do { \ __typeof__(*(ptr)) __pu_val = (x); \ int __pu_err = -EFAULT; \ \ + might_fault(); \ if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) { \ switch (size) { \ case 1: __put_user_asm("sb", __pu_addr); break; \ @@ -696,10 +710,10 @@ extern size_t __copy_user(void *__to, const void *__from, size_t __n); const void *__cu_from; \ long __cu_len; \ \ - might_sleep(); \ __cu_to = (to); \ __cu_from = (from); \ __cu_len = (n); \ + might_fault(); \ __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \ __cu_len; \ }) @@ -752,13 +766,14 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n); const void *__cu_from; \ long __cu_len; \ \ - might_sleep(); \ __cu_to = (to); \ __cu_from = (from); \ __cu_len = (n); \ - if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) \ + if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) { \ + might_fault(); \ __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \ __cu_len); \ + } \ __cu_len; \ }) @@ -831,10 +846,10 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n); const void __user *__cu_from; \ long __cu_len; \ \ - might_sleep(); \ __cu_to = (to); \ __cu_from = (from); \ __cu_len = (n); \ + might_fault(); \ __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \ __cu_len); \ __cu_len; \ @@ -862,17 +877,31 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n); const void __user *__cu_from; \ long __cu_len; \ \ - might_sleep(); \ __cu_to = (to); \ __cu_from = (from); \ __cu_len = (n); \ - if (access_ok(VERIFY_READ, __cu_from, __cu_len)) \ + if (access_ok(VERIFY_READ, __cu_from, __cu_len)) { \ + might_fault(); \ __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \ __cu_len); \ + } \ __cu_len; \ }) -#define __copy_in_user(to, from, n) __copy_from_user(to, from, n) +#define __copy_in_user(to, from, n) \ +({ \ + void __user *__cu_to; \ + const void __user *__cu_from; \ + long __cu_len; \ + \ + __cu_to = (to); \ + __cu_from = (from); \ + __cu_len = (n); \ + might_fault(); \ + __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \ + __cu_len); \ + __cu_len; \ +}) #define copy_in_user(to, from, n) \ ({ \ @@ -880,14 +909,15 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n); const void __user *__cu_from; \ long __cu_len; \ \ - might_sleep(); \ __cu_to = (to); \ __cu_from = (from); \ __cu_len = (n); \ if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) && \ - access_ok(VERIFY_WRITE, __cu_to, __cu_len))) \ + access_ok(VERIFY_WRITE, __cu_to, __cu_len))) { \ + might_fault(); \ __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \ __cu_len); \ + } \ __cu_len; \ }) @@ -907,7 +937,7 @@ __clear_user(void __user *addr, __kernel_size_t size) { __kernel_size_t res; - might_sleep(); + might_fault(); __asm__ __volatile__( "move\t$4, %1\n\t" "move\t$5, $0\n\t" @@ -926,7 +956,7 @@ __clear_user(void __user *addr, __kernel_size_t size) void __user * __cl_addr = (addr); \ unsigned long __cl_size = (n); \ if (__cl_size && access_ok(VERIFY_WRITE, \ - ((unsigned long)(__cl_addr)), __cl_size)) \ + __cl_addr, __cl_size)) \ __cl_size = __clear_user(__cl_addr, __cl_size); \ __cl_size; \ }) @@ -956,7 +986,7 @@ __strncpy_from_user(char *__to, const char __user *__from, long __len) { long res; - might_sleep(); + might_fault(); __asm__ __volatile__( "move\t$4, %1\n\t" "move\t$5, %2\n\t" @@ -993,7 +1023,7 @@ strncpy_from_user(char *__to, const char __user *__from, long __len) { long res; - might_sleep(); + might_fault(); __asm__ __volatile__( "move\t$4, %1\n\t" "move\t$5, %2\n\t" @@ -1012,7 +1042,7 @@ static inline long __strlen_user(const char __user *s) { long res; - might_sleep(); + might_fault(); __asm__ __volatile__( "move\t$4, %1\n\t" __MODULE_JAL(__strlen_user_nocheck_asm) @@ -1042,7 +1072,7 @@ static inline long strlen_user(const char __user *s) { long res; - might_sleep(); + might_fault(); __asm__ __volatile__( "move\t$4, %1\n\t" __MODULE_JAL(__strlen_user_asm) @@ -1059,7 +1089,7 @@ static inline long __strnlen_user(const char __user *s, long n) { long res; - might_sleep(); + might_fault(); __asm__ __volatile__( "move\t$4, %1\n\t" "move\t$5, %2\n\t" @@ -1090,7 +1120,7 @@ static inline long strnlen_user(const char __user *s, long n) { long res; - might_sleep(); + might_fault(); __asm__ __volatile__( "move\t$4, %1\n\t" "move\t$5, %2\n\t" diff --git a/arch/mips/kernel/cevt-smtc.c b/arch/mips/kernel/cevt-smtc.c index 6d45e24db5b..df6f5bc6057 100644 --- a/arch/mips/kernel/cevt-smtc.c +++ b/arch/mips/kernel/cevt-smtc.c @@ -245,7 +245,7 @@ irqreturn_t c0_compare_interrupt(int irq, void *dev_id) } -int __cpuinit mips_clockevent_init(void) +int __cpuinit smtc_clockevent_init(void) { uint64_t mips_freq = mips_hpt_frequency; unsigned int cpu = smp_processor_id(); diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c index 26760cad8b6..e0a4ac18fa0 100644 --- a/arch/mips/kernel/proc.c +++ b/arch/mips/kernel/proc.c @@ -42,7 +42,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) seq_printf(m, fmt, __cpu_name[n], (version >> 4) & 0x0f, version & 0x0f, (fp_vers >> 4) & 0x0f, fp_vers & 0x0f); - seq_printf(m, "BogoMIPS\t\t: %lu.%02lu\n", + seq_printf(m, "BogoMIPS\t\t: %u.%02u\n", cpu_data[n].udelay_val / (500000/HZ), (cpu_data[n].udelay_val / (5000/HZ)) % 100); seq_printf(m, "wait instruction\t: %s\n", cpu_wait ? "yes" : "no"); diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index c2c16ef9218..93cc672f452 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S @@ -405,8 +405,8 @@ EXPORT(sysn32_call_table) PTR sys_eventfd PTR sys_fallocate PTR sys_timerfd_create - PTR sys_timerfd_gettime /* 5285 */ - PTR sys_timerfd_settime + PTR compat_sys_timerfd_gettime /* 5285 */ + PTR compat_sys_timerfd_settime PTR sys_signalfd4 PTR sys_eventfd2 PTR sys_epoll_create1 diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index 002fac27021..a5598b2339d 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S @@ -525,8 +525,8 @@ sys_call_table: PTR sys_eventfd PTR sys32_fallocate /* 4320 */ PTR sys_timerfd_create - PTR sys_timerfd_gettime - PTR sys_timerfd_settime + PTR compat_sys_timerfd_gettime + PTR compat_sys_timerfd_settime PTR compat_sys_signalfd4 PTR sys_eventfd2 /* 4325 */ PTR sys_epoll_create1 diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c index bf4c4a979ab..67bd626942a 100644 --- a/arch/mips/kernel/unaligned.c +++ b/arch/mips/kernel/unaligned.c @@ -482,19 +482,19 @@ fault: return; die_if_kernel("Unhandled kernel unaligned access", regs); - send_sig(SIGSEGV, current, 1); + force_sig(SIGSEGV, current); return; sigbus: die_if_kernel("Unhandled kernel unaligned access", regs); - send_sig(SIGBUS, current, 1); + force_sig(SIGBUS, current); return; sigill: die_if_kernel("Unhandled kernel unaligned access or invalid instruction", regs); - send_sig(SIGILL, current, 1); + force_sig(SIGILL, current); } asmlinkage void do_ade(struct pt_regs *regs) diff --git a/arch/mips/lib/Makefile b/arch/mips/lib/Makefile index c13c7ad2cda..2adead5a8a3 100644 --- a/arch/mips/lib/Makefile +++ b/arch/mips/lib/Makefile @@ -2,8 +2,8 @@ # Makefile for MIPS-specific library files.. # -lib-y += csum_partial.o memcpy.o memcpy-inatomic.o memset.o strlen_user.o \ - strncpy_user.o strnlen_user.o uncached.o +lib-y += csum_partial.o delay.o memcpy.o memcpy-inatomic.o memset.o \ + strlen_user.o strncpy_user.o strnlen_user.o uncached.o obj-y += iomap.o obj-$(CONFIG_PCI) += iomap-pci.o diff --git a/arch/mips/lib/delay.c b/arch/mips/lib/delay.c new file mode 100644 index 00000000000..f69c6b569eb --- /dev/null +++ b/arch/mips/lib/delay.c @@ -0,0 +1,56 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1994 by Waldorf Electronics + * Copyright (C) 1995 - 2000, 01, 03 by Ralf Baechle + * Copyright (C) 1999, 2000 Silicon Graphics, Inc. + * Copyright (C) 2007 Maciej W. Rozycki + */ +#include +#include +#include + +#include +#include + +inline void __delay(unsigned int loops) +{ + __asm__ __volatile__ ( + " .set noreorder \n" + " .align 3 \n" + "1: bnez %0, 1b \n" + " subu %0, 1 \n" + " .set reorder \n" + : "=r" (loops) + : "0" (loops)); +} +EXPORT_SYMBOL(__delay); + +/* + * Division by multiplication: you don't have to worry about + * loss of precision. + * + * Use only for very small delays ( < 1 msec). Should probably use a + * lookup table, really, as the multiplications take much too long with + * short delays. This is a "reasonable" implementation, though (and the + * first constant multiplications gets optimized away if the delay is + * a constant) + */ + +void __udelay(unsigned long us) +{ + unsigned int lpj = current_cpu_data.udelay_val; + + __delay((us * 0x000010c7 * HZ * lpj) >> 32); +} +EXPORT_SYMBOL(__udelay); + +void __ndelay(unsigned long ns) +{ + unsigned int lpj = current_cpu_data.udelay_val; + + __delay((us * 0x00000005 * HZ * lpj) >> 32); +} +EXPORT_SYMBOL(__ndelay); diff --git a/arch/mips/lib/dump_tlb.c b/arch/mips/lib/dump_tlb.c index 779821cd54a..3f69725556a 100644 --- a/arch/mips/lib/dump_tlb.c +++ b/arch/mips/lib/dump_tlb.c @@ -19,6 +19,15 @@ static inline const char *msk2str(unsigned int mask) case PM_16K: return "16kb"; case PM_64K: return "64kb"; case PM_256K: return "256kb"; +#ifdef CONFIG_CPU_CAVIUM_OCTEON + case PM_8K: return "8kb"; + case PM_32K: return "32kb"; + case PM_128K: return "128kb"; + case PM_512K: return "512kb"; + case PM_2M: return "2Mb"; + case PM_8M: return "8Mb"; + case PM_32M: return "32Mb"; +#endif #ifndef CONFIG_CPU_VR41XX case PM_1M: return "1Mb"; case PM_4M: return "4Mb"; diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index 58d9075e86f..171951d2305 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -1041,7 +1041,7 @@ static void __cpuinit probe_pcache(void) printk("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n", icache_size >> 10, - cpu_has_vtag_icache ? "VIVT" : "VIPT", + c->icache.flags & MIPS_CACHE_VTAG ? "VIVT" : "VIPT", way_string[c->icache.ways], c->icache.linesz); printk("Primary data cache %ldkB, %s, %s, %s, linesize %d bytes\n", diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c index bed56f1ac83..4fdb7f5216b 100644 --- a/arch/mips/mm/dma-default.c +++ b/arch/mips/mm/dma-default.c @@ -209,7 +209,7 @@ dma_addr_t dma_map_page(struct device *dev, struct page *page, unsigned long addr; addr = (unsigned long) page_address(page) + offset; - dma_cache_wback_inv(addr, size); + __dma_sync(addr, size, direction); } return plat_map_dma_mem_page(dev, page) + offset; @@ -217,23 +217,6 @@ dma_addr_t dma_map_page(struct device *dev, struct page *page, EXPORT_SYMBOL(dma_map_page); -void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, - enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); - - if (!plat_device_is_coherent(dev) && direction != DMA_TO_DEVICE) { - unsigned long addr; - - addr = dma_addr_to_virt(dma_address); - dma_cache_wback_inv(addr, size); - } - - plat_unmap_dma_mem(dev, dma_address); -} - -EXPORT_SYMBOL(dma_unmap_page); - void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, enum dma_data_direction direction) { diff --git a/arch/mips/mm/highmem.c b/arch/mips/mm/highmem.c index 4481656d106..2b1309b2580 100644 --- a/arch/mips/mm/highmem.c +++ b/arch/mips/mm/highmem.c @@ -1,7 +1,12 @@ #include #include +#include #include +static pte_t *kmap_pte; + +unsigned long highstart_pfn, highend_pfn; + void *__kmap(struct page *page) { void *addr; @@ -14,6 +19,7 @@ void *__kmap(struct page *page) return addr; } +EXPORT_SYMBOL(__kmap); void __kunmap(struct page *page) { @@ -22,6 +28,7 @@ void __kunmap(struct page *page) return; kunmap_high(page); } +EXPORT_SYMBOL(__kunmap); /* * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because @@ -48,11 +55,12 @@ void *__kmap_atomic(struct page *page, enum km_type type) #ifdef CONFIG_DEBUG_HIGHMEM BUG_ON(!pte_none(*(kmap_pte - idx))); #endif - set_pte(kmap_pte-idx, mk_pte(page, kmap_prot)); + set_pte(kmap_pte-idx, mk_pte(page, PAGE_KERNEL)); local_flush_tlb_one((unsigned long)vaddr); return (void*) vaddr; } +EXPORT_SYMBOL(__kmap_atomic); void __kunmap_atomic(void *kvaddr, enum km_type type) { @@ -77,6 +85,7 @@ void __kunmap_atomic(void *kvaddr, enum km_type type) pagefault_enable(); } +EXPORT_SYMBOL(__kunmap_atomic); /* * This is the same as kmap_atomic() but can map memory that doesn't @@ -92,7 +101,7 @@ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) debug_kmap_atomic(type); idx = type + KM_TYPE_NR*smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); - set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot)); + set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL)); flush_tlb_one(vaddr); return (void*) vaddr; @@ -111,7 +120,11 @@ struct page *__kmap_atomic_to_page(void *ptr) return pte_page(*pte); } -EXPORT_SYMBOL(__kmap); -EXPORT_SYMBOL(__kunmap); -EXPORT_SYMBOL(__kmap_atomic); -EXPORT_SYMBOL(__kunmap_atomic); +void __init kmap_init(void) +{ + unsigned long kmap_vstart; + + /* cache the first kmap pte */ + kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN); + kmap_pte = kmap_get_fixmap_pte(kmap_vstart); +} diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index d9348946a19..c5511294a9e 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -104,14 +104,6 @@ unsigned long setup_zero_pages(void) return 1UL << order; } -/* - * These are almost like kmap_atomic / kunmap_atmic except they take an - * additional address argument as the hint. - */ - -#define kmap_get_fixmap_pte(vaddr) \ - pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr)) - #ifdef CONFIG_MIPS_MT_SMTC static pte_t *kmap_coherent_pte; static void __init kmap_coherent_init(void) @@ -264,24 +256,6 @@ void copy_from_user_page(struct vm_area_struct *vma, } } -#ifdef CONFIG_HIGHMEM -unsigned long highstart_pfn, highend_pfn; - -pte_t *kmap_pte; -pgprot_t kmap_prot; - -static void __init kmap_init(void) -{ - unsigned long kmap_vstart; - - /* cache the first kmap pte */ - kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN); - kmap_pte = kmap_get_fixmap_pte(kmap_vstart); - - kmap_prot = PAGE_KERNEL; -} -#endif /* CONFIG_HIGHMEM */ - void __init fixrange_init(unsigned long start, unsigned long end, pgd_t *pgd_base) { diff --git a/arch/mips/mm/sc-rm7k.c b/arch/mips/mm/sc-rm7k.c index e3abfb2d7e8..de69bfbf506 100644 --- a/arch/mips/mm/sc-rm7k.c +++ b/arch/mips/mm/sc-rm7k.c @@ -29,7 +29,7 @@ extern unsigned long icache_way_size, dcache_way_size; #include -int rm7k_tcache_enabled; +static int rm7k_tcache_enabled; /* * Writeback and invalidate the primary cache dcache before DMA. @@ -121,7 +121,7 @@ static void rm7k_sc_disable(void) clear_c0_config(RM7K_CONF_SE); } -struct bcache_ops rm7k_sc_ops = { +static struct bcache_ops rm7k_sc_ops = { .bc_enable = rm7k_sc_enable, .bc_disable = rm7k_sc_disable, .bc_wback_inv = rm7k_sc_wback_inv, diff --git a/arch/mips/mm/tlb-r3k.c b/arch/mips/mm/tlb-r3k.c index f0cf46adb97..1c0048a6f5c 100644 --- a/arch/mips/mm/tlb-r3k.c +++ b/arch/mips/mm/tlb-r3k.c @@ -82,8 +82,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, int cpu = smp_processor_id(); if (cpu_context(cpu, mm) != 0) { - unsigned long flags; - int size; + unsigned long size, flags; #ifdef DEBUG_TLB printk("[tlbrange<%lu,0x%08lx,0x%08lx>]", @@ -121,8 +120,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) { - unsigned long flags; - int size; + unsigned long size, flags; #ifdef DEBUG_TLB printk("[tlbrange<%lu,0x%08lx,0x%08lx>]", start, end); diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c index 9619f66e531..892be426787 100644 --- a/arch/mips/mm/tlb-r4k.c +++ b/arch/mips/mm/tlb-r4k.c @@ -117,8 +117,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, int cpu = smp_processor_id(); if (cpu_context(cpu, mm) != 0) { - unsigned long flags; - int size; + unsigned long size, flags; ENTER_CRITICAL(flags); size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; @@ -160,8 +159,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) { - unsigned long flags; - int size; + unsigned long size, flags; ENTER_CRITICAL(flags); size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; diff --git a/arch/mips/mm/tlb-r8k.c b/arch/mips/mm/tlb-r8k.c index 4f01a3be215..4ec95cc2df2 100644 --- a/arch/mips/mm/tlb-r8k.c +++ b/arch/mips/mm/tlb-r8k.c @@ -111,8 +111,7 @@ out_restore: /* Usable for KV1 addresses only! */ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) { - unsigned long flags; - int size; + unsigned long size, flags; size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; size = (size + 1) >> 1; diff --git a/arch/mips/pmc-sierra/Kconfig b/arch/mips/pmc-sierra/Kconfig index 90261b83db0..c139988bb85 100644 --- a/arch/mips/pmc-sierra/Kconfig +++ b/arch/mips/pmc-sierra/Kconfig @@ -36,18 +36,6 @@ config PMC_MSP7120_FPGA endchoice -menu "Options for PMC-Sierra MSP chipsets" - depends on PMC_MSP - -config PMC_MSP_EMBEDDED_ROOTFS - bool "Root filesystem embedded in kernel image" - select MTD - select MTD_BLOCK - select MTD_PMC_MSP_RAMROOT - select MTD_RAM - -endmenu - config HYPERTRANSPORT bool "Hypertransport Support for PMC-Sierra Yosemite" depends on PMC_YOSEMITE diff --git a/arch/mips/pmc-sierra/msp71xx/msp_prom.c b/arch/mips/pmc-sierra/msp71xx/msp_prom.c index e5bd5481d8d..c317a3623ce 100644 --- a/arch/mips/pmc-sierra/msp71xx/msp_prom.c +++ b/arch/mips/pmc-sierra/msp71xx/msp_prom.c @@ -40,12 +40,6 @@ #include #include #include -#ifdef CONFIG_CRAMFS -#include -#endif -#ifdef CONFIG_SQUASHFS -#include -#endif #include #include @@ -435,10 +429,6 @@ struct prom_pmemblock *__init prom_getmdesc(void) char *str; unsigned int memsize; unsigned int heaptop; -#ifdef CONFIG_MTD_PMC_MSP_RAMROOT - void *ramroot_start; - unsigned long ramroot_size; -#endif int i; str = prom_getenv(memsz_env); @@ -506,19 +496,7 @@ struct prom_pmemblock *__init prom_getmdesc(void) i++; /* 3 */ mdesc[i].type = BOOT_MEM_RESERVED; mdesc[i].base = CPHYSADDR((u32)_text); -#ifdef CONFIG_MTD_PMC_MSP_RAMROOT - if (get_ramroot(&ramroot_start, &ramroot_size)) { - /* - * Rootfs in RAM -- follows kernel - * Combine rootfs image with kernel block so a - * page (4k) isn't wasted between memory blocks - */ - mdesc[i].size = CPHYSADDR(PAGE_ALIGN( - (u32)ramroot_start + ramroot_size)) - mdesc[i].base; - } else -#endif - mdesc[i].size = CPHYSADDR(PAGE_ALIGN( - (u32)_end)) - mdesc[i].base; + mdesc[i].size = CPHYSADDR(PAGE_ALIGN((u32)_end)) - mdesc[i].base; /* Remainder of RAM -- under memsize */ i++; /* 5 */ @@ -528,39 +506,3 @@ struct prom_pmemblock *__init prom_getmdesc(void) return &mdesc[0]; } - -/* rootfs functions */ -#ifdef CONFIG_MTD_PMC_MSP_RAMROOT -bool get_ramroot(void **start, unsigned long *size) -{ - extern char _end[]; - - /* Check for start following the end of the kernel */ - void *check_start = (void *)_end; - - /* Check for supported rootfs types */ -#ifdef CONFIG_CRAMFS - if (*(__u32 *)check_start == CRAMFS_MAGIC) { - /* Get CRAMFS size */ - *start = check_start; - *size = PAGE_ALIGN(((struct cramfs_super *) - check_start)->size); - - return true; - } -#endif -#ifdef CONFIG_SQUASHFS - if (*((unsigned int *)check_start) == SQUASHFS_MAGIC) { - /* Get SQUASHFS size */ - *start = check_start; - *size = PAGE_ALIGN(((struct squashfs_super_block *) - check_start)->bytes_used); - - return true; - } -#endif - - return false; -} -EXPORT_SYMBOL(get_ramroot); -#endif diff --git a/arch/mips/pmc-sierra/msp71xx/msp_setup.c b/arch/mips/pmc-sierra/msp71xx/msp_setup.c index c93675615f5..a54e85b3cf2 100644 --- a/arch/mips/pmc-sierra/msp71xx/msp_setup.c +++ b/arch/mips/pmc-sierra/msp71xx/msp_setup.c @@ -21,7 +21,6 @@ #if defined(CONFIG_PMC_MSP7120_GW) #include -#include #define MSP_BOARD_RESET_GPIO 9 #endif @@ -88,11 +87,8 @@ void msp7120_reset(void) * as GPIO char driver may not be enabled and it would look up * data inRAM! */ - set_value_reg32(GPIO_CFG3_REG, - basic_mode_mask(MSP_BOARD_RESET_GPIO), - basic_mode(MSP_GPIO_OUTPUT, MSP_BOARD_RESET_GPIO)); - set_reg32(GPIO_DATA3_REG, - basic_data_mask(MSP_BOARD_RESET_GPIO)); + set_value_reg32(GPIO_CFG3_REG, 0xf000, 0x8000); + set_reg32(GPIO_DATA3_REG, 8); /* * In case GPIO9 doesn't reset the board (jumper configurable!) diff --git a/arch/mips/pmc-sierra/msp71xx/msp_time.c b/arch/mips/pmc-sierra/msp71xx/msp_time.c index 7cfeda5a651..cca64e15f57 100644 --- a/arch/mips/pmc-sierra/msp71xx/msp_time.c +++ b/arch/mips/pmc-sierra/msp71xx/msp_time.c @@ -81,10 +81,7 @@ void __init plat_time_init(void) mips_hpt_frequency = cpu_rate/2; } -void __init plat_timer_setup(struct irqaction *irq) +unsigned int __init get_c0_compare_int(void) { -#ifdef CONFIG_IRQ_MSP_CIC - /* we are using the vpe0 counter for timer interrupts */ - setup_irq(MSP_INT_VPE0_TIMER, irq); -#endif + return MSP_INT_VPE0_TIMER; } diff --git a/arch/mips/sgi-ip22/ip22-reset.c b/arch/mips/sgi-ip22/ip22-reset.c index 4ad5c3393fd..45b6694c207 100644 --- a/arch/mips/sgi-ip22/ip22-reset.c +++ b/arch/mips/sgi-ip22/ip22-reset.c @@ -148,7 +148,7 @@ static irqreturn_t panel_int(int irq, void *dev_id) if (sgint->istat1 & SGINT_ISTAT1_PWR) { /* Wait until interrupt goes away */ - disable_irq(SGI_PANEL_IRQ); + disable_irq_nosync(SGI_PANEL_IRQ); init_timer(&debounce_timer); debounce_timer.function = debounce; debounce_timer.expires = jiffies + 5; diff --git a/arch/mips/sgi-ip32/ip32-berr.c b/arch/mips/sgi-ip32/ip32-berr.c index a278e918a01..afc1cadbba3 100644 --- a/arch/mips/sgi-ip32/ip32-berr.c +++ b/arch/mips/sgi-ip32/ip32-berr.c @@ -16,7 +16,7 @@ #include #include -int ip32_be_handler(struct pt_regs *regs, int is_fixup) +static int ip32_be_handler(struct pt_regs *regs, int is_fixup) { int data = regs->cp0_cause & 4; diff --git a/arch/mips/sgi-ip32/ip32-irq.c b/arch/mips/sgi-ip32/ip32-irq.c index 83a0b3c359d..5c2bf111ca6 100644 --- a/arch/mips/sgi-ip32/ip32-irq.c +++ b/arch/mips/sgi-ip32/ip32-irq.c @@ -112,13 +112,13 @@ static void inline flush_mace_bus(void) extern irqreturn_t crime_memerr_intr(int irq, void *dev_id); extern irqreturn_t crime_cpuerr_intr(int irq, void *dev_id); -struct irqaction memerr_irq = { +static struct irqaction memerr_irq = { .handler = crime_memerr_intr, .flags = IRQF_DISABLED, .name = "CRIME memory error", }; -struct irqaction cpuerr_irq = { +static struct irqaction cpuerr_irq = { .handler = crime_cpuerr_intr, .flags = IRQF_DISABLED, .name = "CRIME CPU error", diff --git a/arch/mips/sgi-ip32/ip32-reset.c b/arch/mips/sgi-ip32/ip32-reset.c index b6cab089561..9b95d80ebc6 100644 --- a/arch/mips/sgi-ip32/ip32-reset.c +++ b/arch/mips/sgi-ip32/ip32-reset.c @@ -53,7 +53,7 @@ static inline void ip32_machine_halt(void) static void ip32_machine_power_off(void) { - volatile unsigned char reg_a, xctrl_a, xctrl_b; + unsigned char reg_a, xctrl_a, xctrl_b; disable_irq(MACEISA_RTC_IRQ); reg_a = CMOS_READ(RTC_REG_A); @@ -91,9 +91,10 @@ static void blink_timeout(unsigned long data) static void debounce(unsigned long data) { - volatile unsigned char reg_a, reg_c, xctrl_a; + unsigned char reg_a, reg_c, xctrl_a; reg_c = CMOS_READ(RTC_INTR_FLAGS); + reg_a = CMOS_READ(RTC_REG_A); CMOS_WRITE(reg_a | DS_REGA_DV0, RTC_REG_A); wbflush(); xctrl_a = CMOS_READ(DS_B1_XCTRL4A); @@ -137,7 +138,7 @@ static inline void ip32_power_button(void) static irqreturn_t ip32_rtc_int(int irq, void *dev_id) { - volatile unsigned char reg_c; + unsigned char reg_c; reg_c = CMOS_READ(RTC_INTR_FLAGS); if (!(reg_c & RTC_IRQF)) { @@ -145,7 +146,7 @@ static irqreturn_t ip32_rtc_int(int irq, void *dev_id) "%s: RTC IRQ without RTC_IRQF\n", __func__); } /* Wait until interrupt goes away */ - disable_irq(MACEISA_RTC_IRQ); + disable_irq_nosync(MACEISA_RTC_IRQ); init_timer(&debounce_timer); debounce_timer.function = debounce; debounce_timer.expires = jiffies + 50; diff --git a/arch/mips/sibyte/bcm1480/irq.c b/arch/mips/sibyte/bcm1480/irq.c index 352352b3cb2..c147c4b35d3 100644 --- a/arch/mips/sibyte/bcm1480/irq.c +++ b/arch/mips/sibyte/bcm1480/irq.c @@ -113,7 +113,6 @@ static void bcm1480_set_affinity(unsigned int irq, const struct cpumask *mask) { int i = 0, old_cpu, cpu, int_on, k; u64 cur_ints; - struct irq_desc *desc = irq_desc + irq; unsigned long flags; unsigned int irq_dirty; @@ -127,8 +126,7 @@ static void bcm1480_set_affinity(unsigned int irq, const struct cpumask *mask) cpu = cpu_logical_map(i); /* Protect against other affinity changers and IMR manipulation */ - spin_lock_irqsave(&desc->lock, flags); - spin_lock(&bcm1480_imr_lock); + spin_lock_irqsave(&bcm1480_imr_lock, flags); /* Swizzle each CPU's IMR (but leave the IP selection alone) */ old_cpu = bcm1480_irq_owner[irq]; @@ -153,8 +151,7 @@ static void bcm1480_set_affinity(unsigned int irq, const struct cpumask *mask) ____raw_writeq(cur_ints, IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + (k*BCM1480_IMR_HL_SPACING))); } } - spin_unlock(&bcm1480_imr_lock); - spin_unlock_irqrestore(&desc->lock, flags); + spin_unlock_irqrestore(&bcm1480_imr_lock, flags); } #endif diff --git a/arch/mips/sibyte/cfe/setup.c b/arch/mips/sibyte/cfe/setup.c index 3de30f79db3..eb5396cf81b 100644 --- a/arch/mips/sibyte/cfe/setup.c +++ b/arch/mips/sibyte/cfe/setup.c @@ -288,13 +288,7 @@ void __init prom_init(void) */ cfe_cons_handle = cfe_getstdhandle(CFE_STDHANDLE_CONSOLE); if (cfe_getenv("LINUX_CMDLINE", arcs_cmdline, CL_SIZE) < 0) { - if (argc < 0) { - /* - * It's OK for direct boot to not provide a - * command line - */ - strcpy(arcs_cmdline, "root=/dev/ram0 "); - } else { + if (argc >= 0) { /* The loader should have set the command line */ /* too early for panic to do any good */ printk("LINUX_CMDLINE not defined in cfe."); diff --git a/arch/mips/sibyte/sb1250/irq.c b/arch/mips/sibyte/sb1250/irq.c index c08ff582da6..38cb998ade2 100644 --- a/arch/mips/sibyte/sb1250/irq.c +++ b/arch/mips/sibyte/sb1250/irq.c @@ -107,7 +107,6 @@ static void sb1250_set_affinity(unsigned int irq, const struct cpumask *mask) { int i = 0, old_cpu, cpu, int_on; u64 cur_ints; - struct irq_desc *desc = irq_desc + irq; unsigned long flags; i = cpumask_first(mask); @@ -121,8 +120,7 @@ static void sb1250_set_affinity(unsigned int irq, const struct cpumask *mask) cpu = cpu_logical_map(i); /* Protect against other affinity changers and IMR manipulation */ - spin_lock_irqsave(&desc->lock, flags); - spin_lock(&sb1250_imr_lock); + spin_lock_irqsave(&sb1250_imr_lock, flags); /* Swizzle each CPU's IMR (but leave the IP selection alone) */ old_cpu = sb1250_irq_owner[irq]; @@ -144,8 +142,7 @@ static void sb1250_set_affinity(unsigned int irq, const struct cpumask *mask) ____raw_writeq(cur_ints, IOADDR(A_IMR_MAPPER(cpu) + R_IMR_INTERRUPT_MASK)); } - spin_unlock(&sb1250_imr_lock); - spin_unlock_irqrestore(&desc->lock, flags); + spin_unlock_irqrestore(&sb1250_imr_lock, flags); } #endif diff --git a/arch/mips/txx9/generic/setup_tx4927.c b/arch/mips/txx9/generic/setup_tx4927.c index 914e93c6263..1093549df1a 100644 --- a/arch/mips/txx9/generic/setup_tx4927.c +++ b/arch/mips/txx9/generic/setup_tx4927.c @@ -88,7 +88,7 @@ void __init tx4927_setup(void) { int i; __u32 divmode; - int cpuclk = 0; + unsigned int cpuclk = 0; u64 ccfg; txx9_reg_res_init(TX4927_REV_PCODE(), TX4927_REG_BASE, diff --git a/arch/mips/txx9/generic/setup_tx4938.c b/arch/mips/txx9/generic/setup_tx4938.c index f0844f891f0..3925219b897 100644 --- a/arch/mips/txx9/generic/setup_tx4938.c +++ b/arch/mips/txx9/generic/setup_tx4938.c @@ -93,7 +93,7 @@ void __init tx4938_setup(void) { int i; __u32 divmode; - int cpuclk = 0; + unsigned int cpuclk = 0; u64 ccfg; txx9_reg_res_init(TX4938_REV_PCODE(), TX4938_REG_BASE, diff --git a/arch/mips/txx9/generic/setup_tx4939.c b/arch/mips/txx9/generic/setup_tx4939.c index 7a25b573e9b..c2bf150c883 100644 --- a/arch/mips/txx9/generic/setup_tx4939.c +++ b/arch/mips/txx9/generic/setup_tx4939.c @@ -114,7 +114,7 @@ void __init tx4939_setup(void) int i; __u32 divmode; __u64 pcfg; - int cpuclk = 0; + unsigned int cpuclk = 0; txx9_reg_res_init(TX4939_REV_PCODE(), TX4939_REG_BASE, TX4939_REG_SIZE); diff --git a/arch/mips/txx9/rbtx4939/setup.c b/arch/mips/txx9/rbtx4939/setup.c index 011e1e332f4..4199c6fd4d1 100644 --- a/arch/mips/txx9/rbtx4939/setup.c +++ b/arch/mips/txx9/rbtx4939/setup.c @@ -536,7 +536,7 @@ static void __init rbtx4939_setup(void) } struct txx9_board_vec rbtx4939_vec __initdata = { - .system = "Tothiba RBTX4939", + .system = "Toshiba RBTX4939", .prom_init = rbtx4939_prom_init, .mem_setup = rbtx4939_setup, .irq_setup = rbtx4939_irq_setup, diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index a0d1146a057..cdc9a6ff4be 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -868,6 +868,18 @@ config TASK_SIZE default "0x80000000" if PPC_PREP || PPC_8xx default "0xc0000000" +config CONSISTENT_SIZE_BOOL + bool "Set custom consistent memory pool size" + depends on ADVANCED_OPTIONS && NOT_COHERENT_CACHE + help + This option allows you to set the size of the + consistent memory pool. This pool of virtual memory + is used to make consistent memory allocations. + +config CONSISTENT_SIZE + hex "Size of consistent memory pool" if CONSISTENT_SIZE_BOOL + default "0x00200000" if NOT_COHERENT_CACHE + config PIN_TLB bool "Pinned Kernel TLBs (860 ONLY)" depends on ADVANCED_OPTIONS && 8xx diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile index 8da2bf963b5..9ae7b7e2ba7 100644 --- a/arch/powerpc/boot/Makefile +++ b/arch/powerpc/boot/Makefile @@ -346,7 +346,7 @@ install: $(CONFIGURE) $(addprefix $(obj)/, $(image-y)) clean-files += $(image-) $(initrd-) cuImage.* dtbImage.* treeImage.* \ zImage zImage.initrd zImage.chrp zImage.coff zImage.holly \ zImage.iseries zImage.miboot zImage.pmac zImage.pseries \ - otheros.bld *.dtb + simpleImage.* otheros.bld *.dtb # clean up files cached by wrapper clean-kernel := vmlinux.strip vmlinux.bin diff --git a/arch/powerpc/boot/mktree.c b/arch/powerpc/boot/mktree.c index 45d06a8c7cd..c2baae0a3d8 100644 --- a/arch/powerpc/boot/mktree.c +++ b/arch/powerpc/boot/mktree.c @@ -42,7 +42,7 @@ int main(int argc, char *argv[]) { int in_fd, out_fd; int nblks, i; - uint cksum, *cp; + unsigned int cksum, *cp; struct stat st; boot_block_t bt; @@ -90,7 +90,7 @@ int main(int argc, char *argv[]) cksum = 0; cp = (void *)&bt; - for (i=0; i #include @@ -24,6 +22,8 @@ extern unsigned long FIXADDR_TOP; #include #endif +#define FIXADDR_TOP ((unsigned long)(-PAGE_SIZE)) + /* * Here we define all the compile-time 'special' virtual * addresses. The point is to have a constant address at diff --git a/arch/powerpc/include/asm/iseries/iommu.h b/arch/powerpc/include/asm/iseries/iommu.h index c59ee7e4bed..1b9692c6089 100644 --- a/arch/powerpc/include/asm/iseries/iommu.h +++ b/arch/powerpc/include/asm/iseries/iommu.h @@ -26,10 +26,6 @@ struct vio_dev; struct device_node; struct iommu_table; -/* Creates table for an individual device node */ -extern void iommu_devnode_init_iSeries(struct pci_dev *pdev, - struct device_node *dn); - /* Get table parameters from HV */ extern void iommu_table_getparms_iSeries(unsigned long busno, unsigned char slotno, unsigned char virtbus, diff --git a/arch/powerpc/include/asm/pgtable-ppc32.h b/arch/powerpc/include/asm/pgtable-ppc32.h index ba45c997830..c9ff9d75990 100644 --- a/arch/powerpc/include/asm/pgtable-ppc32.h +++ b/arch/powerpc/include/asm/pgtable-ppc32.h @@ -10,7 +10,7 @@ extern unsigned long va_to_phys(unsigned long address); extern pte_t *va_to_pte(unsigned long address); -extern unsigned long ioremap_bot, ioremap_base; +extern unsigned long ioremap_bot; #ifdef CONFIG_44x extern int icache_44x_need_flush; @@ -55,9 +55,31 @@ extern int icache_44x_need_flush; #define pgd_ERROR(e) \ printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) +/* + * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary + * value (for now) on others, from where we can start layout kernel + * virtual space that goes below PKMAP and FIXMAP + */ +#ifdef CONFIG_HIGHMEM +#define KVIRT_TOP PKMAP_BASE +#else +#define KVIRT_TOP (0xfe000000UL) /* for now, could be FIXMAP_BASE ? */ +#endif + +/* + * ioremap_bot starts at that address. Early ioremaps move down from there, + * until mem_init() at which point this becomes the top of the vmalloc + * and ioremap space + */ +#ifdef CONFIG_NOT_COHERENT_CACHE +#define IOREMAP_TOP ((KVIRT_TOP - CONFIG_CONSISTENT_SIZE) & PAGE_MASK) +#else +#define IOREMAP_TOP KVIRT_TOP +#endif + /* * Just any arbitrary offset to the start of the vmalloc VM area: the - * current 64MB value just means that there will be a 64MB "hole" after the + * current 16MB value just means that there will be a 64MB "hole" after the * physical memory until the kernel virtual memory starts. That means that * any out-of-bounds memory accesses will hopefully be caught. * The vmalloc() routines leaves a hole of 4kB between each vmalloced diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h index f612798e1c9..2b2420a4988 100644 --- a/arch/powerpc/include/asm/system.h +++ b/arch/powerpc/include/asm/system.h @@ -212,7 +212,7 @@ extern struct task_struct *_switch(struct thread_struct *prev, extern unsigned int rtas_data; extern int mem_init_done; /* set on boot once kmalloc can be called */ extern int init_bootmem_done; /* set on !NUMA once bootmem is available */ -extern unsigned long memory_limit; +extern phys_addr_t memory_limit; extern unsigned long klimit; extern void *alloc_maybe_bootmem(size_t size, gfp_t mask); diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index f9c40f869c6..3e33fb933d9 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -1836,7 +1836,7 @@ static void __init setup_cpu_spec(unsigned long offset, struct cpu_spec *s) * and, in that case, keep the current value for * oprofile_cpu_type. */ - if (old.oprofile_cpu_type == NULL) { + if (old.oprofile_cpu_type != NULL) { t->oprofile_cpu_type = old.oprofile_cpu_type; t->oprofile_type = old.oprofile_type; } diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c index 53c7788cba7..6b02793dc75 100644 --- a/arch/powerpc/kernel/dma.c +++ b/arch/powerpc/kernel/dma.c @@ -32,7 +32,7 @@ void *dma_direct_alloc_coherent(struct device *dev, size_t size, { void *ret; #ifdef CONFIG_NOT_COHERENT_CACHE - ret = __dma_alloc_coherent(size, dma_handle, flag); + ret = __dma_alloc_coherent(dev, size, dma_handle, flag); if (ret == NULL) return NULL; *dma_handle += get_dma_direct_offset(dev); diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c index 70e2a736be1..2d182f119d1 100644 --- a/arch/powerpc/kernel/ftrace.c +++ b/arch/powerpc/kernel/ftrace.c @@ -157,7 +157,7 @@ __ftrace_make_nop(struct module *mod, * 0xe8, 0x4c, 0x00, 0x28, ld r2,40(r12) */ - pr_debug("ip:%lx jumps to %lx r2: %lx", ip, tramp, mod->arch.toc); + pr_devel("ip:%lx jumps to %lx r2: %lx", ip, tramp, mod->arch.toc); /* Find where the trampoline jumps to */ if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) { @@ -165,7 +165,7 @@ __ftrace_make_nop(struct module *mod, return -EFAULT; } - pr_debug(" %08x %08x", jmp[0], jmp[1]); + pr_devel(" %08x %08x", jmp[0], jmp[1]); /* verify that this is what we expect it to be */ if (((jmp[0] & 0xffff0000) != 0x3d820000) || @@ -181,18 +181,18 @@ __ftrace_make_nop(struct module *mod, offset = ((unsigned)((unsigned short)jmp[0]) << 16) + (int)((short)jmp[1]); - pr_debug(" %x ", offset); + pr_devel(" %x ", offset); /* get the address this jumps too */ tramp = mod->arch.toc + offset + 32; - pr_debug("toc: %lx", tramp); + pr_devel("toc: %lx", tramp); if (probe_kernel_read(jmp, (void *)tramp, 8)) { printk(KERN_ERR "Failed to read %lx\n", tramp); return -EFAULT; } - pr_debug(" %08x %08x\n", jmp[0], jmp[1]); + pr_devel(" %08x %08x\n", jmp[0], jmp[1]); ptr = ((unsigned long)jmp[0] << 32) + jmp[1]; @@ -269,7 +269,7 @@ __ftrace_make_nop(struct module *mod, * 0x4e, 0x80, 0x04, 0x20 bctr */ - pr_debug("ip:%lx jumps to %lx", ip, tramp); + pr_devel("ip:%lx jumps to %lx", ip, tramp); /* Find where the trampoline jumps to */ if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) { @@ -277,7 +277,7 @@ __ftrace_make_nop(struct module *mod, return -EFAULT; } - pr_debug(" %08x %08x ", jmp[0], jmp[1]); + pr_devel(" %08x %08x ", jmp[0], jmp[1]); /* verify that this is what we expect it to be */ if (((jmp[0] & 0xffff0000) != 0x3d600000) || @@ -293,7 +293,7 @@ __ftrace_make_nop(struct module *mod, if (tramp & 0x8000) tramp -= 0x10000; - pr_debug(" %lx ", tramp); + pr_devel(" %lx ", tramp); if (tramp != addr) { printk(KERN_ERR @@ -402,7 +402,7 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) /* ld r2,40(r1) */ op[1] = 0xe8410028; - pr_debug("write to %lx\n", rec->ip); + pr_devel("write to %lx\n", rec->ip); if (probe_kernel_write((void *)ip, op, MCOUNT_INSN_SIZE * 2)) return -EPERM; @@ -442,7 +442,7 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) return -EINVAL; } - pr_debug("write to %lx\n", rec->ip); + pr_devel("write to %lx\n", rec->ip); if (probe_kernel_write((void *)ip, &op, MCOUNT_INSN_SIZE)) return -EPERM; @@ -594,7 +594,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) PPC_LONG "2b,4b\n" ".previous" - : [old] "=r" (old), [faulted] "=r" (faulted) + : [old] "=&r" (old), [faulted] "=r" (faulted) : [parent] "r" (parent), [return_hooker] "r" (return_hooker) : "memory" ); diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c index d59e2b1bdcb..bb3d893a835 100644 --- a/arch/powerpc/kernel/machine_kexec.c +++ b/arch/powerpc/kernel/machine_kexec.c @@ -125,8 +125,8 @@ void __init reserve_crashkernel(void) /* Crash kernel trumps memory limit */ if (memory_limit && memory_limit <= crashk_res.end) { memory_limit = crashk_res.end + 1; - printk("Adjusted memory limit for crashkernel, now 0x%lx\n", - memory_limit); + printk("Adjusted memory limit for crashkernel, now 0x%llx\n", + (unsigned long long)memory_limit); } printk(KERN_INFO "Reserving %ldMB of memory at %ldMB " diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index 9c69e7e145c..4fee63cb53f 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c @@ -1366,12 +1366,17 @@ static void __init pcibios_allocate_resources(int pass) for_each_pci_dev(dev) { pci_read_config_word(dev, PCI_COMMAND, &command); - for (idx = 0; idx < 6; idx++) { + for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) { r = &dev->resource[idx]; if (r->parent) /* Already allocated */ continue; if (!r->flags || (r->flags & IORESOURCE_UNSET)) continue; /* Not assigned at all */ + /* We only allocate ROMs on pass 1 just in case they + * have been screwed up by firmware + */ + if (idx == PCI_ROM_RESOURCE ) + disabled = 1; if (r->flags & IORESOURCE_IO) disabled = !(command & PCI_COMMAND_IO); else @@ -1382,17 +1387,19 @@ static void __init pcibios_allocate_resources(int pass) if (pass) continue; r = &dev->resource[PCI_ROM_RESOURCE]; - if (r->flags & IORESOURCE_ROM_ENABLE) { + if (r->flags) { /* Turn the ROM off, leave the resource region, * but keep it unregistered. */ u32 reg; - pr_debug("PCI: Switching off ROM of %s\n", - pci_name(dev)); - r->flags &= ~IORESOURCE_ROM_ENABLE; pci_read_config_dword(dev, dev->rom_base_reg, ®); - pci_write_config_dword(dev, dev->rom_base_reg, - reg & ~PCI_ROM_ADDRESS_ENABLE); + if (reg & PCI_ROM_ADDRESS_ENABLE) { + pr_debug("PCI: Switching off ROM of %s\n", + pci_name(dev)); + r->flags &= ~IORESOURCE_ROM_ENABLE; + pci_write_config_dword(dev, dev->rom_base_reg, + reg & ~PCI_ROM_ADDRESS_ENABLE); + } } } } diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c index be574fc0d92..96edb6f8bab 100644 --- a/arch/powerpc/kernel/pci_64.c +++ b/arch/powerpc/kernel/pci_64.c @@ -64,7 +64,7 @@ static u32 get_int_prop(struct device_node *np, const char *name, u32 def) return def; } -static unsigned int pci_parse_of_flags(u32 addr0) +static unsigned int pci_parse_of_flags(u32 addr0, int bridge) { unsigned int flags = 0; @@ -75,8 +75,17 @@ static unsigned int pci_parse_of_flags(u32 addr0) if (addr0 & 0x40000000) flags |= IORESOURCE_PREFETCH | PCI_BASE_ADDRESS_MEM_PREFETCH; + /* Note: We don't know whether the ROM has been left enabled + * by the firmware or not. We mark it as disabled (ie, we do + * not set the IORESOURCE_ROM_ENABLE flag) for now rather than + * do a config space read, it will be force-enabled if needed + */ + if (!bridge && (addr0 & 0xff) == 0x30) + flags |= IORESOURCE_READONLY; } else if (addr0 & 0x01000000) flags = IORESOURCE_IO | PCI_BASE_ADDRESS_SPACE_IO; + if (flags) + flags |= IORESOURCE_SIZEALIGN; return flags; } @@ -95,7 +104,7 @@ static void pci_parse_of_addrs(struct device_node *node, struct pci_dev *dev) return; pr_debug(" parse addresses (%d bytes) @ %p\n", proplen, addrs); for (; proplen >= 20; proplen -= 20, addrs += 5) { - flags = pci_parse_of_flags(addrs[0]); + flags = pci_parse_of_flags(addrs[0], 0); if (!flags) continue; base = of_read_number(&addrs[1], 2); @@ -293,7 +302,7 @@ void __devinit of_scan_pci_bridge(struct device_node *node, } i = 1; for (; len >= 32; len -= 32, ranges += 8) { - flags = pci_parse_of_flags(ranges[0]); + flags = pci_parse_of_flags(ranges[0], 1); size = of_read_number(&ranges[6], 2); if (flags == 0 || size == 0) continue; diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 5ec6a9e2393..ce01ff2474d 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -426,7 +426,7 @@ static int __init early_parse_mem(char *p) return 1; memory_limit = PAGE_ALIGN(memparse(p, &p)); - DBG("memory limit = 0x%lx\n", memory_limit); + DBG("memory limit = 0x%llx\n", (unsigned long long)memory_limit); return 0; } @@ -1160,7 +1160,7 @@ static inline void __init phyp_dump_reserve_mem(void) {} void __init early_init_devtree(void *params) { - unsigned long limit; + phys_addr_t limit; DBG(" -> early_init_devtree(%p)\n", params); @@ -1204,7 +1204,7 @@ void __init early_init_devtree(void *params) limit = memory_limit; if (! limit) { - unsigned long memsize; + phys_addr_t memsize; /* Ensure that total memory size is page-aligned, because * otherwise mark_bootmem() gets upset. */ @@ -1218,7 +1218,7 @@ void __init early_init_devtree(void *params) lmb_analyze(); lmb_dump_all(); - DBG("Phys. mem: %lx\n", lmb_phys_mem_size()); + DBG("Phys. mem: %llx\n", lmb_phys_mem_size()); /* We may need to relocate the flat tree, do it now. * FIXME .. and the initrd too? */ diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index a047a6cfca4..8ef8a14abc9 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -264,6 +264,7 @@ SECTIONS *(.data.page_aligned) } + . = ALIGN(L1_CACHE_BYTES); .data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) { *(.data.cacheline_aligned) } diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile index 8db35278a4b..29b742b90f1 100644 --- a/arch/powerpc/lib/Makefile +++ b/arch/powerpc/lib/Makefile @@ -18,7 +18,6 @@ obj-$(CONFIG_PPC64) += copypage_64.o copyuser_64.o \ memcpy_64.o usercopy_64.o mem_64.o string.o obj-$(CONFIG_XMON) += sstep.o obj-$(CONFIG_KPROBES) += sstep.o -obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-noncoherent.o ifeq ($(CONFIG_PPC64),y) obj-$(CONFIG_SMP) += locks.o diff --git a/arch/powerpc/lib/dma-noncoherent.c b/arch/powerpc/lib/dma-noncoherent.c deleted file mode 100644 index 005a28d380a..00000000000 --- a/arch/powerpc/lib/dma-noncoherent.c +++ /dev/null @@ -1,237 +0,0 @@ -/* - * PowerPC version derived from arch/arm/mm/consistent.c - * Copyright (C) 2001 Dan Malek (dmalek@jlc.net) - * - * Copyright (C) 2000 Russell King - * - * Consistent memory allocators. Used for DMA devices that want to - * share uncached memory with the processor core. The function return - * is the virtual address and 'dma_handle' is the physical address. - * Mostly stolen from the ARM port, with some changes for PowerPC. - * -- Dan - * - * Reorganized to get rid of the arch-specific consistent_* functions - * and provide non-coherent implementations for the DMA API. -Matt - * - * Added in_interrupt() safe dma_alloc_coherent()/dma_free_coherent() - * implementation. This is pulled straight from ARM and barely - * modified. -Matt - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -/* - * Allocate DMA-coherent memory space and return both the kernel remapped - * virtual and bus address for that space. - */ -void * -__dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp) -{ - struct page *page; - unsigned long order; - int i; - unsigned int nr_pages = PAGE_ALIGN(size)>>PAGE_SHIFT; - unsigned int array_size = nr_pages * sizeof(struct page *); - struct page **pages; - struct page *end; - u64 mask = 0x00ffffff, limit; /* ISA default */ - struct vm_struct *area; - - BUG_ON(!mem_init_done); - size = PAGE_ALIGN(size); - limit = (mask + 1) & ~mask; - if (limit && size >= limit) { - printk(KERN_WARNING "coherent allocation too big (requested " - "%#x mask %#Lx)\n", size, mask); - return NULL; - } - - order = get_order(size); - - if (mask != 0xffffffff) - gfp |= GFP_DMA; - - page = alloc_pages(gfp, order); - if (!page) - goto no_page; - - end = page + (1 << order); - - /* - * Invalidate any data that might be lurking in the - * kernel direct-mapped region for device DMA. - */ - { - unsigned long kaddr = (unsigned long)page_address(page); - memset(page_address(page), 0, size); - flush_dcache_range(kaddr, kaddr + size); - } - - split_page(page, order); - - /* - * Set the "dma handle" - */ - *handle = page_to_phys(page); - - area = get_vm_area_caller(size, VM_IOREMAP, - __builtin_return_address(1)); - if (!area) - goto out_free_pages; - - if (array_size > PAGE_SIZE) { - pages = vmalloc(array_size); - area->flags |= VM_VPAGES; - } else { - pages = kmalloc(array_size, GFP_KERNEL); - } - if (!pages) - goto out_free_area; - - area->pages = pages; - area->nr_pages = nr_pages; - - for (i = 0; i < nr_pages; i++) - pages[i] = page + i; - - if (map_vm_area(area, pgprot_noncached(PAGE_KERNEL), &pages)) - goto out_unmap; - - /* - * Free the otherwise unused pages. - */ - page += nr_pages; - while (page < end) { - __free_page(page); - page++; - } - - return area->addr; -out_unmap: - vunmap(area->addr); - if (array_size > PAGE_SIZE) - vfree(pages); - else - kfree(pages); - goto out_free_pages; -out_free_area: - free_vm_area(area); -out_free_pages: - if (page) - __free_pages(page, order); -no_page: - return NULL; -} -EXPORT_SYMBOL(__dma_alloc_coherent); - -/* - * free a page as defined by the above mapping. - */ -void __dma_free_coherent(size_t size, void *vaddr) -{ - vfree(vaddr); - -} -EXPORT_SYMBOL(__dma_free_coherent); - -/* - * make an area consistent. - */ -void __dma_sync(void *vaddr, size_t size, int direction) -{ - unsigned long start = (unsigned long)vaddr; - unsigned long end = start + size; - - switch (direction) { - case DMA_NONE: - BUG(); - case DMA_FROM_DEVICE: - /* - * invalidate only when cache-line aligned otherwise there is - * the potential for discarding uncommitted data from the cache - */ - if ((start & (L1_CACHE_BYTES - 1)) || (size & (L1_CACHE_BYTES - 1))) - flush_dcache_range(start, end); - else - invalidate_dcache_range(start, end); - break; - case DMA_TO_DEVICE: /* writeback only */ - clean_dcache_range(start, end); - break; - case DMA_BIDIRECTIONAL: /* writeback and invalidate */ - flush_dcache_range(start, end); - break; - } -} -EXPORT_SYMBOL(__dma_sync); - -#ifdef CONFIG_HIGHMEM -/* - * __dma_sync_page() implementation for systems using highmem. - * In this case, each page of a buffer must be kmapped/kunmapped - * in order to have a virtual address for __dma_sync(). This must - * not sleep so kmap_atomic()/kunmap_atomic() are used. - * - * Note: yes, it is possible and correct to have a buffer extend - * beyond the first page. - */ -static inline void __dma_sync_page_highmem(struct page *page, - unsigned long offset, size_t size, int direction) -{ - size_t seg_size = min((size_t)(PAGE_SIZE - offset), size); - size_t cur_size = seg_size; - unsigned long flags, start, seg_offset = offset; - int nr_segs = 1 + ((size - seg_size) + PAGE_SIZE - 1)/PAGE_SIZE; - int seg_nr = 0; - - local_irq_save(flags); - - do { - start = (unsigned long)kmap_atomic(page + seg_nr, - KM_PPC_SYNC_PAGE) + seg_offset; - - /* Sync this buffer segment */ - __dma_sync((void *)start, seg_size, direction); - kunmap_atomic((void *)start, KM_PPC_SYNC_PAGE); - seg_nr++; - - /* Calculate next buffer segment size */ - seg_size = min((size_t)PAGE_SIZE, size - cur_size); - - /* Add the segment size to our running total */ - cur_size += seg_size; - seg_offset = 0; - } while (seg_nr < nr_segs); - - local_irq_restore(flags); -} -#endif /* CONFIG_HIGHMEM */ - -/* - * __dma_sync_page makes memory consistent. identical to __dma_sync, but - * takes a struct page instead of a virtual address - */ -void __dma_sync_page(struct page *page, unsigned long offset, - size_t size, int direction) -{ -#ifdef CONFIG_HIGHMEM - __dma_sync_page_highmem(page, offset, size, direction); -#else - unsigned long start = (unsigned long)page_address(page) + offset; - __dma_sync((void *)start, size, direction); -#endif -} -EXPORT_SYMBOL(__dma_sync_page); diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile index 17290bcedc5..b746f4ca420 100644 --- a/arch/powerpc/mm/Makefile +++ b/arch/powerpc/mm/Makefile @@ -26,3 +26,4 @@ obj-$(CONFIG_NEED_MULTIPLE_NODES) += numa.o obj-$(CONFIG_PPC_MM_SLICES) += slice.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o obj-$(CONFIG_PPC_SUBPAGE_PROT) += subpage-prot.o +obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-noncoherent.o diff --git a/arch/powerpc/mm/dma-noncoherent.c b/arch/powerpc/mm/dma-noncoherent.c new file mode 100644 index 00000000000..36692f5c9a7 --- /dev/null +++ b/arch/powerpc/mm/dma-noncoherent.c @@ -0,0 +1,400 @@ +/* + * PowerPC version derived from arch/arm/mm/consistent.c + * Copyright (C) 2001 Dan Malek (dmalek@jlc.net) + * + * Copyright (C) 2000 Russell King + * + * Consistent memory allocators. Used for DMA devices that want to + * share uncached memory with the processor core. The function return + * is the virtual address and 'dma_handle' is the physical address. + * Mostly stolen from the ARM port, with some changes for PowerPC. + * -- Dan + * + * Reorganized to get rid of the arch-specific consistent_* functions + * and provide non-coherent implementations for the DMA API. -Matt + * + * Added in_interrupt() safe dma_alloc_coherent()/dma_free_coherent() + * implementation. This is pulled straight from ARM and barely + * modified. -Matt + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "mmu_decl.h" + +/* + * This address range defaults to a value that is safe for all + * platforms which currently set CONFIG_NOT_COHERENT_CACHE. It + * can be further configured for specific applications under + * the "Advanced Setup" menu. -Matt + */ +#define CONSISTENT_BASE (IOREMAP_TOP) +#define CONSISTENT_END (CONSISTENT_BASE + CONFIG_CONSISTENT_SIZE) +#define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT) + +/* + * This is the page table (2MB) covering uncached, DMA consistent allocations + */ +static DEFINE_SPINLOCK(consistent_lock); + +/* + * VM region handling support. + * + * This should become something generic, handling VM region allocations for + * vmalloc and similar (ioremap, module space, etc). + * + * I envisage vmalloc()'s supporting vm_struct becoming: + * + * struct vm_struct { + * struct vm_region region; + * unsigned long flags; + * struct page **pages; + * unsigned int nr_pages; + * unsigned long phys_addr; + * }; + * + * get_vm_area() would then call vm_region_alloc with an appropriate + * struct vm_region head (eg): + * + * struct vm_region vmalloc_head = { + * .vm_list = LIST_HEAD_INIT(vmalloc_head.vm_list), + * .vm_start = VMALLOC_START, + * .vm_end = VMALLOC_END, + * }; + * + * However, vmalloc_head.vm_start is variable (typically, it is dependent on + * the amount of RAM found at boot time.) I would imagine that get_vm_area() + * would have to initialise this each time prior to calling vm_region_alloc(). + */ +struct ppc_vm_region { + struct list_head vm_list; + unsigned long vm_start; + unsigned long vm_end; +}; + +static struct ppc_vm_region consistent_head = { + .vm_list = LIST_HEAD_INIT(consistent_head.vm_list), + .vm_start = CONSISTENT_BASE, + .vm_end = CONSISTENT_END, +}; + +static struct ppc_vm_region * +ppc_vm_region_alloc(struct ppc_vm_region *head, size_t size, gfp_t gfp) +{ + unsigned long addr = head->vm_start, end = head->vm_end - size; + unsigned long flags; + struct ppc_vm_region *c, *new; + + new = kmalloc(sizeof(struct ppc_vm_region), gfp); + if (!new) + goto out; + + spin_lock_irqsave(&consistent_lock, flags); + + list_for_each_entry(c, &head->vm_list, vm_list) { + if ((addr + size) < addr) + goto nospc; + if ((addr + size) <= c->vm_start) + goto found; + addr = c->vm_end; + if (addr > end) + goto nospc; + } + + found: + /* + * Insert this entry _before_ the one we found. + */ + list_add_tail(&new->vm_list, &c->vm_list); + new->vm_start = addr; + new->vm_end = addr + size; + + spin_unlock_irqrestore(&consistent_lock, flags); + return new; + + nospc: + spin_unlock_irqrestore(&consistent_lock, flags); + kfree(new); + out: + return NULL; +} + +static struct ppc_vm_region *ppc_vm_region_find(struct ppc_vm_region *head, unsigned long addr) +{ + struct ppc_vm_region *c; + + list_for_each_entry(c, &head->vm_list, vm_list) { + if (c->vm_start == addr) + goto out; + } + c = NULL; + out: + return c; +} + +/* + * Allocate DMA-coherent memory space and return both the kernel remapped + * virtual and bus address for that space. + */ +void * +__dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp) +{ + struct page *page; + struct ppc_vm_region *c; + unsigned long order; + u64 mask = ISA_DMA_THRESHOLD, limit; + + if (dev) { + mask = dev->coherent_dma_mask; + + /* + * Sanity check the DMA mask - it must be non-zero, and + * must be able to be satisfied by a DMA allocation. + */ + if (mask == 0) { + dev_warn(dev, "coherent DMA mask is unset\n"); + goto no_page; + } + + if ((~mask) & ISA_DMA_THRESHOLD) { + dev_warn(dev, "coherent DMA mask %#llx is smaller " + "than system GFP_DMA mask %#llx\n", + mask, (unsigned long long)ISA_DMA_THRESHOLD); + goto no_page; + } + } + + + size = PAGE_ALIGN(size); + limit = (mask + 1) & ~mask; + if ((limit && size >= limit) || + size >= (CONSISTENT_END - CONSISTENT_BASE)) { + printk(KERN_WARNING "coherent allocation too big (requested %#x mask %#Lx)\n", + size, mask); + return NULL; + } + + order = get_order(size); + + /* Might be useful if we ever have a real legacy DMA zone... */ + if (mask != 0xffffffff) + gfp |= GFP_DMA; + + page = alloc_pages(gfp, order); + if (!page) + goto no_page; + + /* + * Invalidate any data that might be lurking in the + * kernel direct-mapped region for device DMA. + */ + { + unsigned long kaddr = (unsigned long)page_address(page); + memset(page_address(page), 0, size); + flush_dcache_range(kaddr, kaddr + size); + } + + /* + * Allocate a virtual address in the consistent mapping region. + */ + c = ppc_vm_region_alloc(&consistent_head, size, + gfp & ~(__GFP_DMA | __GFP_HIGHMEM)); + if (c) { + unsigned long vaddr = c->vm_start; + struct page *end = page + (1 << order); + + split_page(page, order); + + /* + * Set the "dma handle" + */ + *handle = page_to_phys(page); + + do { + SetPageReserved(page); + map_page(vaddr, page_to_phys(page), + pgprot_noncached(PAGE_KERNEL)); + page++; + vaddr += PAGE_SIZE; + } while (size -= PAGE_SIZE); + + /* + * Free the otherwise unused pages. + */ + while (page < end) { + __free_page(page); + page++; + } + + return (void *)c->vm_start; + } + + if (page) + __free_pages(page, order); + no_page: + return NULL; +} +EXPORT_SYMBOL(__dma_alloc_coherent); + +/* + * free a page as defined by the above mapping. + */ +void __dma_free_coherent(size_t size, void *vaddr) +{ + struct ppc_vm_region *c; + unsigned long flags, addr; + + size = PAGE_ALIGN(size); + + spin_lock_irqsave(&consistent_lock, flags); + + c = ppc_vm_region_find(&consistent_head, (unsigned long)vaddr); + if (!c) + goto no_area; + + if ((c->vm_end - c->vm_start) != size) { + printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n", + __func__, c->vm_end - c->vm_start, size); + dump_stack(); + size = c->vm_end - c->vm_start; + } + + addr = c->vm_start; + do { + pte_t *ptep; + unsigned long pfn; + + ptep = pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(addr), + addr), + addr), + addr); + if (!pte_none(*ptep) && pte_present(*ptep)) { + pfn = pte_pfn(*ptep); + pte_clear(&init_mm, addr, ptep); + if (pfn_valid(pfn)) { + struct page *page = pfn_to_page(pfn); + + ClearPageReserved(page); + __free_page(page); + } + } + addr += PAGE_SIZE; + } while (size -= PAGE_SIZE); + + flush_tlb_kernel_range(c->vm_start, c->vm_end); + + list_del(&c->vm_list); + + spin_unlock_irqrestore(&consistent_lock, flags); + + kfree(c); + return; + + no_area: + spin_unlock_irqrestore(&consistent_lock, flags); + printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n", + __func__, vaddr); + dump_stack(); +} +EXPORT_SYMBOL(__dma_free_coherent); + +/* + * make an area consistent. + */ +void __dma_sync(void *vaddr, size_t size, int direction) +{ + unsigned long start = (unsigned long)vaddr; + unsigned long end = start + size; + + switch (direction) { + case DMA_NONE: + BUG(); + case DMA_FROM_DEVICE: + /* + * invalidate only when cache-line aligned otherwise there is + * the potential for discarding uncommitted data from the cache + */ + if ((start & (L1_CACHE_BYTES - 1)) || (size & (L1_CACHE_BYTES - 1))) + flush_dcache_range(start, end); + else + invalidate_dcache_range(start, end); + break; + case DMA_TO_DEVICE: /* writeback only */ + clean_dcache_range(start, end); + break; + case DMA_BIDIRECTIONAL: /* writeback and invalidate */ + flush_dcache_range(start, end); + break; + } +} +EXPORT_SYMBOL(__dma_sync); + +#ifdef CONFIG_HIGHMEM +/* + * __dma_sync_page() implementation for systems using highmem. + * In this case, each page of a buffer must be kmapped/kunmapped + * in order to have a virtual address for __dma_sync(). This must + * not sleep so kmap_atomic()/kunmap_atomic() are used. + * + * Note: yes, it is possible and correct to have a buffer extend + * beyond the first page. + */ +static inline void __dma_sync_page_highmem(struct page *page, + unsigned long offset, size_t size, int direction) +{ + size_t seg_size = min((size_t)(PAGE_SIZE - offset), size); + size_t cur_size = seg_size; + unsigned long flags, start, seg_offset = offset; + int nr_segs = 1 + ((size - seg_size) + PAGE_SIZE - 1)/PAGE_SIZE; + int seg_nr = 0; + + local_irq_save(flags); + + do { + start = (unsigned long)kmap_atomic(page + seg_nr, + KM_PPC_SYNC_PAGE) + seg_offset; + + /* Sync this buffer segment */ + __dma_sync((void *)start, seg_size, direction); + kunmap_atomic((void *)start, KM_PPC_SYNC_PAGE); + seg_nr++; + + /* Calculate next buffer segment size */ + seg_size = min((size_t)PAGE_SIZE, size - cur_size); + + /* Add the segment size to our running total */ + cur_size += seg_size; + seg_offset = 0; + } while (seg_nr < nr_segs); + + local_irq_restore(flags); +} +#endif /* CONFIG_HIGHMEM */ + +/* + * __dma_sync_page makes memory consistent. identical to __dma_sync, but + * takes a struct page instead of a virtual address + */ +void __dma_sync_page(struct page *page, unsigned long offset, + size_t size, int direction) +{ +#ifdef CONFIG_HIGHMEM + __dma_sync_page_highmem(page, offset, size, direction); +#else + unsigned long start = (unsigned long)page_address(page) + offset; + __dma_sync((void *)start, size, direction); +#endif +} +EXPORT_SYMBOL(__dma_sync_page); diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c index 666a5e8a5be..3de6a0d9382 100644 --- a/arch/powerpc/mm/init_32.c +++ b/arch/powerpc/mm/init_32.c @@ -168,12 +168,8 @@ void __init MMU_init(void) ppc_md.progress("MMU:mapin", 0x301); mapin_ram(); -#ifdef CONFIG_HIGHMEM - ioremap_base = PKMAP_BASE; -#else - ioremap_base = 0xfe000000UL; /* for now, could be 0xfffff000 */ -#endif /* CONFIG_HIGHMEM */ - ioremap_bot = ioremap_base; + /* Initialize early top-down ioremap allocator */ + ioremap_bot = IOREMAP_TOP; /* Map in I/O resources */ if (ppc_md.progress) diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index f668fa9ba80..579382c163a 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -57,7 +57,7 @@ int init_bootmem_done; int mem_init_done; -unsigned long memory_limit; +phys_addr_t memory_limit; #ifdef CONFIG_HIGHMEM pte_t *kmap_pte; @@ -380,6 +380,23 @@ void __init mem_init(void) bsssize >> 10, initsize >> 10); +#ifdef CONFIG_PPC32 + pr_info("Kernel virtual memory layout:\n"); + pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START, FIXADDR_TOP); +#ifdef CONFIG_HIGHMEM + pr_info(" * 0x%08lx..0x%08lx : highmem PTEs\n", + PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP)); +#endif /* CONFIG_HIGHMEM */ +#ifdef CONFIG_NOT_COHERENT_CACHE + pr_info(" * 0x%08lx..0x%08lx : consistent mem\n", + IOREMAP_TOP, IOREMAP_TOP + CONFIG_CONSISTENT_SIZE); +#endif /* CONFIG_NOT_COHERENT_CACHE */ + pr_info(" * 0x%08lx..0x%08lx : early ioremap\n", + ioremap_bot, IOREMAP_TOP); + pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n", + VMALLOC_START, VMALLOC_END); +#endif /* CONFIG_PPC32 */ + mem_init_done = 1; } diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c index a70e311bd45..030d0005b4d 100644 --- a/arch/powerpc/mm/mmu_context_nohash.c +++ b/arch/powerpc/mm/mmu_context_nohash.c @@ -127,12 +127,12 @@ static unsigned int steal_context_up(unsigned int id) pr_debug("[%d] steal context %d from mm @%p\n", cpu, id, mm); - /* Mark this mm has having no context anymore */ - mm->context.id = MMU_NO_CONTEXT; - /* Flush the TLB for that context */ local_flush_tlb_mm(mm); + /* Mark this mm has having no context anymore */ + mm->context.id = MMU_NO_CONTEXT; + /* XXX This clear should ultimately be part of local_flush_tlb_mm */ __clear_bit(id, stale_map[cpu]); diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c index f5c6fd42265..ae1d67cc090 100644 --- a/arch/powerpc/mm/pgtable.c +++ b/arch/powerpc/mm/pgtable.c @@ -219,7 +219,8 @@ int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, entry = do_dcache_icache_coherency(entry); changed = !pte_same(*(ptep), entry); if (changed) { - assert_pte_locked(vma->vm_mm, address); + if (!(vma->vm_flags & VM_HUGETLB)) + assert_pte_locked(vma->vm_mm, address); __ptep_set_access_flags(ptep, entry); flush_tlb_page_nohash(vma, address); } diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index 430d0908fa5..5422169626b 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c @@ -399,8 +399,6 @@ void kernel_map_pages(struct page *page, int numpages, int enable) #endif /* CONFIG_DEBUG_PAGEALLOC */ static int fixmaps; -unsigned long FIXADDR_TOP = (-PAGE_SIZE); -EXPORT_SYMBOL(FIXADDR_TOP); void __set_fixmap (enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags) { diff --git a/arch/powerpc/oprofile/op_model_power4.c b/arch/powerpc/oprofile/op_model_power4.c index 3e3d91f536e..80774092db7 100644 --- a/arch/powerpc/oprofile/op_model_power4.c +++ b/arch/powerpc/oprofile/op_model_power4.c @@ -26,6 +26,7 @@ static unsigned long reset_value[OP_MAX_COUNTER]; static int oprofile_running; +static int use_slot_nums; /* mmcr values are set in power4_reg_setup, used in power4_cpu_setup */ static u32 mmcr0_val; @@ -61,6 +62,12 @@ static int power4_reg_setup(struct op_counter_config *ctr, else mmcr0_val |= MMCR0_PROBLEM_DISABLE; + if (__is_processor(PV_POWER4) || __is_processor(PV_POWER4p) || + __is_processor(PV_970) || __is_processor(PV_970FX) || + __is_processor(PV_970MP) || __is_processor(PV_970GX) || + __is_processor(PV_POWER5) || __is_processor(PV_POWER5p)) + use_slot_nums = 1; + return 0; } @@ -206,7 +213,7 @@ static unsigned long get_pc(struct pt_regs *regs) mmcra = mfspr(SPRN_MMCRA); - if (mmcra & MMCRA_SAMPLE_ENABLE) { + if (use_slot_nums && (mmcra & MMCRA_SAMPLE_ENABLE)) { slot = ((mmcra & MMCRA_SLOT) >> MMCRA_SLOT_SHIFT); if (slot > 1) pc += 4 * (slot - 1); diff --git a/arch/powerpc/platforms/40x/Kconfig b/arch/powerpc/platforms/40x/Kconfig index 14e027f5be6..f39c953d535 100644 --- a/arch/powerpc/platforms/40x/Kconfig +++ b/arch/powerpc/platforms/40x/Kconfig @@ -153,6 +153,7 @@ config 405GPR config XILINX_VIRTEX bool + select DEFAULT_UIMAGE config XILINX_VIRTEX_II_PRO bool diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig index bf5c7ff2e6e..0d83a6a0397 100644 --- a/arch/powerpc/platforms/44x/Kconfig +++ b/arch/powerpc/platforms/44x/Kconfig @@ -246,6 +246,7 @@ config IBM440EP_ERR42 # Xilinx specific config options. config XILINX_VIRTEX bool + select DEFAULT_UIMAGE # Xilinx Virtex 5 FXT FPGA architecture, selected by a Xilinx board above config XILINX_VIRTEX_5_FXT diff --git a/arch/powerpc/platforms/cell/ras.c b/arch/powerpc/platforms/cell/ras.c index 5f961c464cc..296b5268754 100644 --- a/arch/powerpc/platforms/cell/ras.c +++ b/arch/powerpc/platforms/cell/ras.c @@ -122,12 +122,23 @@ static int __init cbe_ptcal_enable_on_node(int nid, int order) area->nid = nid; area->order = order; - area->pages = alloc_pages_node(area->nid, GFP_KERNEL, area->order); + area->pages = alloc_pages_node(area->nid, GFP_KERNEL | GFP_THISNODE, + area->order); - if (!area->pages) + if (!area->pages) { + printk(KERN_WARNING "%s: no page on node %d\n", + __func__, area->nid); goto out_free_area; + } - addr = __pa(page_address(area->pages)); + /* + * We move the ptcal area to the middle of the allocated + * page, in order to avoid prefetches in memcpy and similar + * functions stepping on it. + */ + addr = __pa(page_address(area->pages)) + (PAGE_SIZE >> 1); + printk(KERN_DEBUG "%s: enabling PTCAL on node %d address=0x%016lx\n", + __func__, area->nid, addr); ret = -EIO; if (rtas_call(ptcal_start_tok, 3, 1, NULL, area->nid, diff --git a/arch/powerpc/platforms/iseries/Kconfig b/arch/powerpc/platforms/iseries/Kconfig index 647e8778743..47a20cfb448 100644 --- a/arch/powerpc/platforms/iseries/Kconfig +++ b/arch/powerpc/platforms/iseries/Kconfig @@ -17,6 +17,7 @@ config VIODASD config VIOCD tristate "iSeries Virtual I/O CD support" + depends on BLOCK select VIOPATH help If you are running Linux on an IBM iSeries system and you want to diff --git a/arch/powerpc/platforms/iseries/iommu.c b/arch/powerpc/platforms/iseries/iommu.c index ff43f1fd834..40219823d9b 100644 --- a/arch/powerpc/platforms/iseries/iommu.c +++ b/arch/powerpc/platforms/iseries/iommu.c @@ -174,9 +174,10 @@ static struct iommu_table *iommu_table_find(struct iommu_table * tbl) } -void iommu_devnode_init_iSeries(struct pci_dev *pdev, struct device_node *dn) +static void pci_dma_dev_setup_iseries(struct pci_dev *pdev) { struct iommu_table *tbl; + struct device_node *dn = pdev->sysdata; struct pci_dn *pdn = PCI_DN(dn); const u32 *lsn = of_get_property(dn, "linux,logical-slot-number", NULL); @@ -194,6 +195,8 @@ void iommu_devnode_init_iSeries(struct pci_dev *pdev, struct device_node *dn) kfree(tbl); pdev->dev.archdata.dma_data = pdn->iommu_table; } +#else +#define pci_dma_dev_setup_iseries NULL #endif static struct iommu_table veth_iommu_table; @@ -251,5 +254,6 @@ void iommu_init_early_iSeries(void) ppc_md.tce_build = tce_build_iSeries; ppc_md.tce_free = tce_free_iSeries; + ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_iseries; set_pci_dma_ops(&dma_iommu_ops); } diff --git a/arch/powerpc/platforms/iseries/pci.c b/arch/powerpc/platforms/iseries/pci.c index 02a634faedb..21cddc30220 100644 --- a/arch/powerpc/platforms/iseries/pci.c +++ b/arch/powerpc/platforms/iseries/pci.c @@ -444,7 +444,6 @@ void __init iSeries_pcibios_fixup_resources(struct pci_dev *pdev) pdev->sysdata = node; allocate_device_bars(pdev); iseries_device_information(pdev, bus, *sub_bus); - iommu_devnode_init_iSeries(pdev, node); } /* diff --git a/arch/powerpc/platforms/maple/pci.c b/arch/powerpc/platforms/maple/pci.c index 301855263b8..04296ffff8b 100644 --- a/arch/powerpc/platforms/maple/pci.c +++ b/arch/powerpc/platforms/maple/pci.c @@ -592,3 +592,17 @@ int maple_pci_get_legacy_ide_irq(struct pci_dev *pdev, int channel) } return irq; } + +static void __devinit quirk_ipr_msi(struct pci_dev *dev) +{ + /* Something prevents MSIs from the IPR from working on Bimini, + * and the driver has no smarts to recover. So disable MSI + * on it for now. */ + + if (machine_is(maple)) { + dev->no_msi = 1; + dev_info(&dev->dev, "Quirk disabled MSI\n"); + } +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, + quirk_ipr_msi); diff --git a/arch/powerpc/sysdev/fsl_soc.c b/arch/powerpc/sysdev/fsl_soc.c index afe8dbc964a..5c64ccd402e 100644 --- a/arch/powerpc/sysdev/fsl_soc.c +++ b/arch/powerpc/sysdev/fsl_soc.c @@ -208,52 +208,6 @@ static int __init of_add_fixed_phys(void) arch_initcall(of_add_fixed_phys); #endif /* CONFIG_FIXED_PHY */ -#ifdef CONFIG_PPC_83xx -static int __init mpc83xx_wdt_init(void) -{ - struct resource r; - struct device_node *np; - struct platform_device *dev; - u32 freq = fsl_get_sys_freq(); - int ret; - - np = of_find_compatible_node(NULL, "watchdog", "mpc83xx_wdt"); - - if (!np) { - ret = -ENODEV; - goto nodev; - } - - memset(&r, 0, sizeof(r)); - - ret = of_address_to_resource(np, 0, &r); - if (ret) - goto err; - - dev = platform_device_register_simple("mpc83xx_wdt", 0, &r, 1); - if (IS_ERR(dev)) { - ret = PTR_ERR(dev); - goto err; - } - - ret = platform_device_add_data(dev, &freq, sizeof(freq)); - if (ret) - goto unreg; - - of_node_put(np); - return 0; - -unreg: - platform_device_unregister(dev); -err: - of_node_put(np); -nodev: - return ret; -} - -arch_initcall(mpc83xx_wdt_init); -#endif - static enum fsl_usb2_phy_modes determine_usb_phy(const char *phy_type) { if (!phy_type) diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c index 21b95670159..0efc12d1a3d 100644 --- a/arch/powerpc/sysdev/mpic.c +++ b/arch/powerpc/sysdev/mpic.c @@ -1057,13 +1057,6 @@ struct mpic * __init mpic_alloc(struct device_node *node, memset(mpic, 0, sizeof(struct mpic)); mpic->name = name; - mpic->irqhost = irq_alloc_host(node, IRQ_HOST_MAP_LINEAR, - isu_size, &mpic_host_ops, - flags & MPIC_LARGE_VECTORS ? 2048 : 256); - if (mpic->irqhost == NULL) - return NULL; - - mpic->irqhost->host_data = mpic; mpic->hc_irq = mpic_irq_chip; mpic->hc_irq.typename = name; if (flags & MPIC_PRIMARY) @@ -1213,6 +1206,15 @@ struct mpic * __init mpic_alloc(struct device_node *node, mpic->isu_shift = 1 + __ilog2(mpic->isu_size - 1); mpic->isu_mask = (1 << mpic->isu_shift) - 1; + mpic->irqhost = irq_alloc_host(node, IRQ_HOST_MAP_LINEAR, + isu_size ? isu_size : mpic->num_sources, + &mpic_host_ops, + flags & MPIC_LARGE_VECTORS ? 2048 : 256); + if (mpic->irqhost == NULL) + return NULL; + + mpic->irqhost->host_data = mpic; + /* Display version */ switch (greg_feature & MPIC_GREG_FEATURE_VERSION_MASK) { case 1: diff --git a/arch/powerpc/sysdev/xilinx_intc.c b/arch/powerpc/sysdev/xilinx_intc.c index a22e1a2df1a..c658b413c9b 100644 --- a/arch/powerpc/sysdev/xilinx_intc.c +++ b/arch/powerpc/sysdev/xilinx_intc.c @@ -41,8 +41,32 @@ static struct irq_host *master_irqhost; +#define XILINX_INTC_MAXIRQS (32) + +/* The following table allows the interrupt type, edge or level, + * to be cached after being read from the device tree until the interrupt + * is mapped + */ +static int xilinx_intc_typetable[XILINX_INTC_MAXIRQS]; + +/* Map the interrupt type from the device tree to the interrupt types + * used by the interrupt subsystem + */ +static unsigned char xilinx_intc_map_senses[] = { + IRQ_TYPE_EDGE_RISING, + IRQ_TYPE_EDGE_FALLING, + IRQ_TYPE_LEVEL_HIGH, + IRQ_TYPE_LEVEL_LOW, +}; + /* - * IRQ Chip operations + * The interrupt controller is setup such that it doesn't work well with + * the level interrupt handler in the kernel because the handler acks the + * interrupt before calling the application interrupt handler. To deal with + * that, we use 2 different irq chips so that different functions can be + * used for level and edge type interrupts. + * + * IRQ Chip common (across level and edge) operations */ static void xilinx_intc_mask(unsigned int virq) { @@ -52,15 +76,54 @@ static void xilinx_intc_mask(unsigned int virq) out_be32(regs + XINTC_CIE, 1 << irq); } -static void xilinx_intc_unmask(unsigned int virq) +static int xilinx_intc_set_type(unsigned int virq, unsigned int flow_type) +{ + struct irq_desc *desc = get_irq_desc(virq); + + desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL); + desc->status |= flow_type & IRQ_TYPE_SENSE_MASK; + if (flow_type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) + desc->status |= IRQ_LEVEL; + return 0; +} + +/* + * IRQ Chip level operations + */ +static void xilinx_intc_level_unmask(unsigned int virq) { int irq = virq_to_hw(virq); void * regs = get_irq_chip_data(virq); pr_debug("unmask: %d\n", irq); out_be32(regs + XINTC_SIE, 1 << irq); + + /* ack level irqs because they can't be acked during + * ack function since the handle_level_irq function + * acks the irq before calling the inerrupt handler + */ + out_be32(regs + XINTC_IAR, 1 << irq); } -static void xilinx_intc_ack(unsigned int virq) +static struct irq_chip xilinx_intc_level_irqchip = { + .typename = "Xilinx Level INTC", + .mask = xilinx_intc_mask, + .mask_ack = xilinx_intc_mask, + .unmask = xilinx_intc_level_unmask, + .set_type = xilinx_intc_set_type, +}; + +/* + * IRQ Chip edge operations + */ +static void xilinx_intc_edge_unmask(unsigned int virq) +{ + int irq = virq_to_hw(virq); + void *regs = get_irq_chip_data(virq); + pr_debug("unmask: %d\n", irq); + out_be32(regs + XINTC_SIE, 1 << irq); +} + +static void xilinx_intc_edge_ack(unsigned int virq) { int irq = virq_to_hw(virq); void * regs = get_irq_chip_data(virq); @@ -68,27 +131,60 @@ static void xilinx_intc_ack(unsigned int virq) out_be32(regs + XINTC_IAR, 1 << irq); } -static struct irq_chip xilinx_intc_irqchip = { - .typename = "Xilinx INTC", +static struct irq_chip xilinx_intc_edge_irqchip = { + .typename = "Xilinx Edge INTC", .mask = xilinx_intc_mask, - .unmask = xilinx_intc_unmask, - .ack = xilinx_intc_ack, + .unmask = xilinx_intc_edge_unmask, + .ack = xilinx_intc_edge_ack, + .set_type = xilinx_intc_set_type, }; /* * IRQ Host operations */ + +/** + * xilinx_intc_xlate - translate virq# from device tree interrupts property + */ +static int xilinx_intc_xlate(struct irq_host *h, struct device_node *ct, + u32 *intspec, unsigned int intsize, + irq_hw_number_t *out_hwirq, + unsigned int *out_flags) +{ + if ((intsize < 2) || (intspec[0] >= XILINX_INTC_MAXIRQS)) + return -EINVAL; + + /* keep a copy of the interrupt type til the interrupt is mapped + */ + xilinx_intc_typetable[intspec[0]] = xilinx_intc_map_senses[intspec[1]]; + + /* Xilinx uses 2 interrupt entries, the 1st being the h/w + * interrupt number, the 2nd being the interrupt type, edge or level + */ + *out_hwirq = intspec[0]; + *out_flags = xilinx_intc_map_senses[intspec[1]]; + + return 0; +} static int xilinx_intc_map(struct irq_host *h, unsigned int virq, irq_hw_number_t irq) { set_irq_chip_data(virq, h->host_data); - set_irq_chip_and_handler(virq, &xilinx_intc_irqchip, handle_level_irq); - set_irq_type(virq, IRQ_TYPE_NONE); + + if (xilinx_intc_typetable[irq] == IRQ_TYPE_LEVEL_HIGH || + xilinx_intc_typetable[irq] == IRQ_TYPE_LEVEL_LOW) { + set_irq_chip_and_handler(virq, &xilinx_intc_level_irqchip, + handle_level_irq); + } else { + set_irq_chip_and_handler(virq, &xilinx_intc_edge_irqchip, + handle_edge_irq); + } return 0; } static struct irq_host_ops xilinx_intc_ops = { .map = xilinx_intc_map, + .xlate = xilinx_intc_xlate, }; struct irq_host * __init @@ -116,7 +212,8 @@ xilinx_intc_init(struct device_node *np) out_be32(regs + XINTC_MER, 0x3UL); /* Turn on the Master Enable. */ /* Allocate and initialize an irq_host structure. */ - irq = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, 32, &xilinx_intc_ops, -1); + irq = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, XILINX_INTC_MAXIRQS, + &xilinx_intc_ops, -1); if (!irq) panic(__FILE__ ": Cannot allocate IRQ host\n"); irq->host_data = regs; diff --git a/arch/sh/boards/board-ap325rxa.c b/arch/sh/boards/board-ap325rxa.c index 39e46919df1..f2a29641b6a 100644 --- a/arch/sh/boards/board-ap325rxa.c +++ b/arch/sh/boards/board-ap325rxa.c @@ -263,6 +263,9 @@ static int camera_probe(void) struct i2c_msg msg; int ret; + if (!a) + return -ENODEV; + camera_power(1); msg.addr = 0x6e; msg.buf = camera_ncm03j_magic; diff --git a/arch/sh/include/asm/flat.h b/arch/sh/include/asm/flat.h index d3b2b4f109e..5d84df5e27f 100644 --- a/arch/sh/include/asm/flat.h +++ b/arch/sh/include/asm/flat.h @@ -12,7 +12,6 @@ #ifndef __ASM_SH_FLAT_H #define __ASM_SH_FLAT_H -#define flat_stack_align(sp) /* nothing needed */ #define flat_argvp_envp_on_stack() 0 #define flat_old_ram_flag(flags) (flags) #define flat_reloc_valid(reloc, size) ((reloc) <= (size)) diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h index 425c2f9be6d..d42e393078c 100644 --- a/arch/sparc/include/asm/elf_64.h +++ b/arch/sparc/include/asm/elf_64.h @@ -208,8 +208,9 @@ do { unsigned long new_flags = current_thread_info()->flags; \ else \ clear_thread_flag(TIF_ABI_PENDING); \ /* flush_thread will update pgd cache */ \ - if (current->personality != PER_LINUX32) \ - set_personality(PER_LINUX); \ + if (personality(current->personality) != PER_LINUX32) \ + set_personality(PER_LINUX | \ + (current->personality & (~PER_MASK))); \ } while (0) #endif /* !(__ASM_SPARC64_ELF_H) */ diff --git a/arch/sparc/lib/csum_copy_from_user.S b/arch/sparc/lib/csum_copy_from_user.S index a22eddbe5db..e0304e6a224 100644 --- a/arch/sparc/lib/csum_copy_from_user.S +++ b/arch/sparc/lib/csum_copy_from_user.S @@ -5,7 +5,7 @@ #define EX_LD(x) \ 98: x; \ - .section .fixup; \ + .section .fixup, "ax"; \ .align 4; \ 99: retl; \ mov -1, %o0; \ diff --git a/arch/sparc/lib/csum_copy_to_user.S b/arch/sparc/lib/csum_copy_to_user.S index d5b12f441f0..afd01acc587 100644 --- a/arch/sparc/lib/csum_copy_to_user.S +++ b/arch/sparc/lib/csum_copy_to_user.S @@ -5,7 +5,7 @@ #define EX_ST(x) \ 98: x; \ - .section .fixup; \ + .section .fixup,"ax"; \ .align 4; \ 99: retl; \ mov -1, %o0; \ diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index df9e885eee1..a6efe0a2e9a 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -498,6 +498,19 @@ config PARAVIRT over full virtualization. However, when run without a hypervisor the kernel is theoretically slower and slightly larger. +config PARAVIRT_SPINLOCKS + bool "Paravirtualization layer for spinlocks" + depends on PARAVIRT && SMP && EXPERIMENTAL + ---help--- + Paravirtualized spinlocks allow a pvops backend to replace the + spinlock implementation with something virtualization-friendly + (for example, block the virtual CPU rather than spinning). + + Unfortunately the downside is an up to 5% performance hit on + native kernels, with various workloads. + + If you are unsure how to answer this question, answer N. + config PARAVIRT_CLOCK bool default n diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c index 857e492c571..bbeb0c3fbd9 100644 --- a/arch/x86/boot/compressed/relocs.c +++ b/arch/x86/boot/compressed/relocs.c @@ -504,8 +504,11 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym)) if (sym->st_shndx == SHN_ABS) { continue; } - if (r_type == R_386_PC32) { - /* PC relative relocations don't need to be adjusted */ + if (r_type == R_386_NONE || r_type == R_386_PC32) { + /* + * NONE can be ignored and and PC relative + * relocations don't need to be adjusted. + */ } else if (r_type == R_386_32) { /* Visit relocations that need to be adjusted */ diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c index 5054c2ddd1a..74b3d2ba84e 100644 --- a/arch/x86/boot/memory.c +++ b/arch/x86/boot/memory.c @@ -17,11 +17,6 @@ #define SMAP 0x534d4150 /* ASCII "SMAP" */ -struct e820_ext_entry { - struct e820entry std; - u32 ext_flags; -} __attribute__((packed)); - static int detect_memory_e820(void) { int count = 0; @@ -29,13 +24,21 @@ static int detect_memory_e820(void) u32 size, id, edi; u8 err; struct e820entry *desc = boot_params.e820_map; - static struct e820_ext_entry buf; /* static so it is zeroed */ + static struct e820entry buf; /* static so it is zeroed */ /* - * Set this here so that if the BIOS doesn't change this field - * but still doesn't change %ecx, we're still okay... + * Note: at least one BIOS is known which assumes that the + * buffer pointed to by one e820 call is the same one as + * the previous call, and only changes modified fields. Therefore, + * we use a temporary buffer and copy the results entry by entry. + * + * This routine deliberately does not try to account for + * ACPI 3+ extended attributes. This is because there are + * BIOSes in the field which report zero for the valid bit for + * all ranges, and we don't currently make any use of the + * other attribute bits. Revisit this if we see the extended + * attribute bits deployed in a meaningful way in the future. */ - buf.ext_flags = 1; do { size = sizeof buf; @@ -66,13 +69,7 @@ static int detect_memory_e820(void) break; } - /* ACPI 3.0 added the extended flags support. If bit 0 - in the extended flags is zero, we're supposed to simply - ignore the entry -- a backwards incompatible change! */ - if (size > 20 && !(buf.ext_flags & 1)) - continue; - - *desc++ = buf.std; + *desc++ = buf; count++; } while (next && count < ARRAY_SIZE(boot_params.e820_map)); diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 378e3691c08..a53da004e08 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -1443,7 +1443,7 @@ u64 _paravirt_ident_64(u64); #define paravirt_nop ((void *)_paravirt_nop) -#ifdef CONFIG_SMP +#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS) static inline int __raw_spin_is_locked(struct raw_spinlock *lock) { diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index aee103b26d0..02ecb30982a 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -82,22 +82,22 @@ do { \ case 1: \ asm(op "b %1,"__percpu_arg(0) \ : "+m" (var) \ - : "ri" ((T__)val)); \ + : "qi" ((T__)(val))); \ break; \ case 2: \ asm(op "w %1,"__percpu_arg(0) \ : "+m" (var) \ - : "ri" ((T__)val)); \ + : "ri" ((T__)(val))); \ break; \ case 4: \ asm(op "l %1,"__percpu_arg(0) \ : "+m" (var) \ - : "ri" ((T__)val)); \ + : "ri" ((T__)(val))); \ break; \ case 8: \ asm(op "q %1,"__percpu_arg(0) \ : "+m" (var) \ - : "re" ((T__)val)); \ + : "re" ((T__)(val))); \ break; \ default: __bad_percpu_size(); \ } \ @@ -109,7 +109,7 @@ do { \ switch (sizeof(var)) { \ case 1: \ asm(op "b "__percpu_arg(1)",%0" \ - : "=r" (ret__) \ + : "=q" (ret__) \ : "m" (var)); \ break; \ case 2: \ diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h index e304b66abee..624f133943e 100644 --- a/arch/x86/include/asm/ptrace.h +++ b/arch/x86/include/asm/ptrace.h @@ -187,14 +187,15 @@ static inline int v8086_mode(struct pt_regs *regs) /* * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode - * when it traps. So regs will be the current sp. + * when it traps. The previous stack will be directly underneath the saved + * registers, and 'sp/ss' won't even have been saved. Thus the '®s->sp'. * * This is valid only for kernel mode traps. */ -static inline unsigned long kernel_trap_sp(struct pt_regs *regs) +static inline unsigned long kernel_stack_pointer(struct pt_regs *regs) { #ifdef CONFIG_X86_32 - return (unsigned long)regs; + return (unsigned long)(®s->sp); #else return regs->sp; #endif diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h index e5e6caffec8..b7e5db87639 100644 --- a/arch/x86/include/asm/spinlock.h +++ b/arch/x86/include/asm/spinlock.h @@ -172,7 +172,7 @@ static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) return (((tmp >> TICKET_SHIFT) - tmp) & ((1 << TICKET_SHIFT) - 1)) > 1; } -#ifndef CONFIG_PARAVIRT +#ifndef CONFIG_PARAVIRT_SPINLOCKS static inline int __raw_spin_is_locked(raw_spinlock_t *lock) { @@ -206,7 +206,7 @@ static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock, __raw_spin_lock(lock); } -#endif +#endif /* CONFIG_PARAVIRT_SPINLOCKS */ static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) { diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 145cce75cda..88d1bfc847d 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -89,7 +89,8 @@ obj-$(CONFIG_DEBUG_NX_TEST) += test_nx.o obj-$(CONFIG_VMI) += vmi_32.o vmiclock_32.o obj-$(CONFIG_KVM_GUEST) += kvm.o obj-$(CONFIG_KVM_CLOCK) += kvmclock.o -obj-$(CONFIG_PARAVIRT) += paravirt.o paravirt_patch_$(BITS).o paravirt-spinlocks.o +obj-$(CONFIG_PARAVIRT) += paravirt.o paravirt_patch_$(BITS).o +obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= paravirt-spinlocks.o obj-$(CONFIG_PARAVIRT_CLOCK) += pvclock.o obj-$(CONFIG_PCSPKR_PLATFORM) += pcspeaker.o diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c index 1c11b819f24..30294777557 100644 --- a/arch/x86/kernel/apic/es7000_32.c +++ b/arch/x86/kernel/apic/es7000_32.c @@ -254,7 +254,7 @@ static int parse_unisys_oem(char *oemptr) } #ifdef CONFIG_ACPI -static int find_unisys_acpi_oem_table(unsigned long *oem_addr) +static int __init find_unisys_acpi_oem_table(unsigned long *oem_addr) { struct acpi_table_header *header = NULL; struct es7000_oem_table *table; @@ -285,7 +285,7 @@ static int find_unisys_acpi_oem_table(unsigned long *oem_addr) return 0; } -static void unmap_unisys_acpi_oem_table(unsigned long oem_addr) +static void __init unmap_unisys_acpi_oem_table(unsigned long oem_addr) { if (!oem_addr) return; @@ -306,7 +306,7 @@ static int es7000_check_dsdt(void) static int es7000_acpi_ret; /* Hook from generic ACPI tables.c */ -static int es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id) +static int __init es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id) { unsigned long oem_addr = 0; int check_dsdt; @@ -717,7 +717,7 @@ struct apic apic_es7000_cluster = { .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, }; -struct apic apic_es7000 = { +struct apic __refdata apic_es7000 = { .name = "es7000", .probe = probe_es7000, diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index c1caefc82e6..77848d9fca6 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -114,6 +114,13 @@ DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { } }; EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); +static int __init x86_xsave_setup(char *s) +{ + setup_clear_cpu_cap(X86_FEATURE_XSAVE); + return 1; +} +__setup("noxsave", x86_xsave_setup); + #ifdef CONFIG_X86_32 static int cachesize_override __cpuinitdata = -1; static int disable_x86_serial_nr __cpuinitdata = 1; diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index 208ecf6643d..752e8c6b2c7 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c @@ -550,7 +550,7 @@ static int __init acpi_cpufreq_early_init(void) return -ENOMEM; } for_each_possible_cpu(i) { - if (!alloc_cpumask_var_node( + if (!zalloc_cpumask_var_node( &per_cpu_ptr(acpi_perf_data, i)->shared_cpu_map, GFP_KERNEL, cpu_to_node(i))) { @@ -693,8 +693,8 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE && policy->cpuinfo.transition_latency > 20 * 1000) { policy->cpuinfo.transition_latency = 20 * 1000; - printk_once(KERN_INFO "Capping off P-state tranision" - " latency at 20 uS\n"); + printk_once(KERN_INFO + "P-state transition latency capped at 20 uS\n"); } /* table init */ diff --git a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c index 6ac55bd341a..86961519372 100644 --- a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c +++ b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c @@ -168,6 +168,7 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c) case 0x0E: /* Core */ case 0x0F: /* Core Duo */ case 0x16: /* Celeron Core */ + case 0x1C: /* Atom */ p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS; return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE); case 0x0D: /* Pentium M (Dothan) */ diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c b/arch/x86/kernel/cpu/cpufreq/powernow-k7.c index 3c28ccd4974..d47c775eb0a 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k7.c @@ -168,10 +168,12 @@ static int check_powernow(void) return 1; } +#ifdef CONFIG_X86_POWERNOW_K7_ACPI static void invalidate_entry(unsigned int entry) { powernow_table[entry].frequency = CPUFREQ_ENTRY_INVALID; } +#endif static int get_ranges(unsigned char *pst) { @@ -320,7 +322,7 @@ static int powernow_acpi_init(void) goto err0; } - if (!alloc_cpumask_var(&acpi_processor_perf->shared_cpu_map, + if (!zalloc_cpumask_var(&acpi_processor_perf->shared_cpu_map, GFP_KERNEL)) { retval = -ENOMEM; goto err05; diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index 4709ead2db5..cf52215d9eb 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c @@ -649,6 +649,20 @@ static void print_basics(struct powernow_k8_data *data) data->batps); } +static u32 freq_from_fid_did(u32 fid, u32 did) +{ + u32 mhz = 0; + + if (boot_cpu_data.x86 == 0x10) + mhz = (100 * (fid + 0x10)) >> did; + else if (boot_cpu_data.x86 == 0x11) + mhz = (100 * (fid + 8)) >> did; + else + BUG(); + + return mhz * 1000; +} + static int fill_powernow_table(struct powernow_k8_data *data, struct pst_s *pst, u8 maxvid) { @@ -821,7 +835,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) { struct cpufreq_frequency_table *powernow_table; int ret_val = -ENODEV; - acpi_integer space_id; + acpi_integer control, status; if (acpi_processor_register_performance(&data->acpi_data, data->cpu)) { dprintk("register performance failed: bad ACPI data\n"); @@ -834,12 +848,13 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) goto err_out; } - space_id = data->acpi_data.control_register.space_id; - if ((space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) || - (space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) { + control = data->acpi_data.control_register.space_id; + status = data->acpi_data.status_register.space_id; + + if ((control != ACPI_ADR_SPACE_FIXED_HARDWARE) || + (status != ACPI_ADR_SPACE_FIXED_HARDWARE)) { dprintk("Invalid control/status registers (%x - %x)\n", - data->acpi_data.control_register.space_id, - space_id); + control, status); goto err_out; } @@ -872,7 +887,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) /* notify BIOS that we exist */ acpi_processor_notify_smm(THIS_MODULE); - if (!alloc_cpumask_var(&data->acpi_data.shared_cpu_map, GFP_KERNEL)) { + if (!zalloc_cpumask_var(&data->acpi_data.shared_cpu_map, GFP_KERNEL)) { printk(KERN_ERR PFX "unable to alloc powernow_k8_data cpumask\n"); ret_val = -ENOMEM; @@ -923,8 +938,13 @@ static int fill_powernow_table_pstate(struct powernow_k8_data *data, powernow_table[i].index = index; - powernow_table[i].frequency = - data->acpi_data.states[i].core_frequency * 1000; + /* Frequency may be rounded for these */ + if (boot_cpu_data.x86 == 0x10 || boot_cpu_data.x86 == 0x11) { + powernow_table[i].frequency = + freq_from_fid_did(lo & 0x3f, (lo >> 6) & 7); + } else + powernow_table[i].frequency = + data->acpi_data.states[i].core_frequency * 1000; } return 0; } @@ -1215,13 +1235,16 @@ static int powernowk8_verify(struct cpufreq_policy *pol) return cpufreq_frequency_table_verify(pol, data->powernow_table); } +static const char ACPI_PSS_BIOS_BUG_MSG[] = + KERN_ERR FW_BUG PFX "No compatible ACPI _PSS objects found.\n" + KERN_ERR FW_BUG PFX "Try again with latest BIOS.\n"; + /* per CPU init entry point to the driver */ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) { struct powernow_k8_data *data; cpumask_t oldmask; int rc; - static int print_once; if (!cpu_online(pol->cpu)) return -ENODEV; @@ -1244,19 +1267,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) * an UP version, and is deprecated by AMD. */ if (num_online_cpus() != 1) { - /* - * Replace this one with print_once as soon as such a - * thing gets introduced - */ - if (!print_once) { - WARN_ONCE(1, KERN_ERR FW_BUG PFX "Your BIOS " - "does not provide ACPI _PSS objects " - "in a way that Linux understands. " - "Please report this to the Linux ACPI" - " maintainers and complain to your " - "BIOS vendor.\n"); - print_once++; - } + printk_once(ACPI_PSS_BIOS_BUG_MSG); goto err_out; } if (pol->cpu != 0) { diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c index c9f1fdc0283..55c831ed71c 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c @@ -471,7 +471,7 @@ static int centrino_target (struct cpufreq_policy *policy, if (unlikely(!alloc_cpumask_var(&saved_mask, GFP_KERNEL))) return -ENOMEM; - if (unlikely(!alloc_cpumask_var(&covered_cpus, GFP_KERNEL))) { + if (unlikely(!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))) { free_cpumask_var(saved_mask); return -ENOMEM; } diff --git a/arch/x86/kernel/cpu/mcheck/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce_64.c index 6fb0b359d2a..09dd1d414fc 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_64.c @@ -1163,7 +1163,7 @@ static __init int mce_init_device(void) if (!mce_available(&boot_cpu_data)) return -EIO; - alloc_cpumask_var(&mce_device_initialized, GFP_KERNEL); + zalloc_cpumask_var(&mce_device_initialized, GFP_KERNEL); err = mce_init_banks(); if (err) diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c index 0b776c09aff..d21d4fb161f 100644 --- a/arch/x86/kernel/cpu/mtrr/generic.c +++ b/arch/x86/kernel/cpu/mtrr/generic.c @@ -275,7 +275,11 @@ static void __init print_mtrr_state(void) } printk(KERN_DEBUG "MTRR variable ranges %sabled:\n", mtrr_state.enabled & 2 ? "en" : "dis"); - high_width = ((size_or_mask ? ffs(size_or_mask) - 1 : 32) - (32 - PAGE_SHIFT) + 3) / 4; + if (size_or_mask & 0xffffffffUL) + high_width = ffs(size_or_mask & 0xffffffffUL) - 1; + else + high_width = ffs(size_or_mask>>32) + 32 - 1; + high_width = (high_width - (32 - PAGE_SHIFT) + 3) / 4; for (i = 0; i < num_var_ranges; ++i) { if (mtrr_state.var_ranges[i].mask_lo & (1 << 11)) printk(KERN_DEBUG " %u base %0*X%05X000 mask %0*X%05X000 %s\n", diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 18dfa30795c..b79c5533c42 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c @@ -442,7 +442,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) _ASM_EXTABLE(1b, 4b) _ASM_EXTABLE(2b, 4b) - : [old] "=r" (old), [faulted] "=r" (faulted) + : [old] "=&r" (old), [faulted] "=r" (faulted) : [parent] "r" (parent), [return_hooker] "r" (return_hooker) : "memory" ); diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c index eedfaebe106..b1f4dffb919 100644 --- a/arch/x86/kernel/kgdb.c +++ b/arch/x86/kernel/kgdb.c @@ -88,6 +88,7 @@ void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs) gdb_regs[GDB_SS] = __KERNEL_DS; gdb_regs[GDB_FS] = 0xFFFF; gdb_regs[GDB_GS] = 0xFFFF; + gdb_regs[GDB_SP] = (int)®s->sp; #else gdb_regs[GDB_R8] = regs->r8; gdb_regs[GDB_R9] = regs->r9; @@ -100,8 +101,8 @@ void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs) gdb_regs32[GDB_PS] = regs->flags; gdb_regs32[GDB_CS] = regs->cs; gdb_regs32[GDB_SS] = regs->ss; -#endif gdb_regs[GDB_SP] = regs->sp; +#endif } /** diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 8e45f446488..9faf43bea33 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -134,7 +134,9 @@ static void *get_call_destination(u8 type) .pv_irq_ops = pv_irq_ops, .pv_apic_ops = pv_apic_ops, .pv_mmu_ops = pv_mmu_ops, +#ifdef CONFIG_PARAVIRT_SPINLOCKS .pv_lock_ops = pv_lock_ops, +#endif }; return *((void **)&tmpl + type); } diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 1340dad417f..667188e0b5a 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -232,6 +232,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "Dell DXP061"), }, }, + { /* Handle problems with rebooting on Sony VGN-Z540N */ + .callback = set_bios_reboot, + .ident = "Sony VGN-Z540N", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "VGN-Z540N"), + }, + }, { } }; diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index 3a97a4cf187..8f0e13be36b 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -160,8 +160,10 @@ static ssize_t __init setup_pcpu_remap(size_t static_size) /* * If large page isn't supported, there's no benefit in doing * this. Also, on non-NUMA, embedding is better. + * + * NOTE: disabled for now. */ - if (!cpu_has_pse || !pcpu_need_numa()) + if (true || !cpu_has_pse || !pcpu_need_numa()) return -EINVAL; /* diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c index ed0c33761e6..8c7b03b0cfc 100644 --- a/arch/x86/kernel/tlb_uv.c +++ b/arch/x86/kernel/tlb_uv.c @@ -832,7 +832,7 @@ static int __init uv_bau_init(void) return 0; for_each_possible_cpu(cur_cpu) - alloc_cpumask_var_node(&per_cpu(uv_flush_tlb_mask, cur_cpu), + zalloc_cpumask_var_node(&per_cpu(uv_flush_tlb_mask, cur_cpu), GFP_KERNEL, cpu_to_node(cur_cpu)); uv_bau_retry_limit = 1; diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index b6caf1329b1..32cf11e5728 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -2897,8 +2897,7 @@ static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu, static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu) { - kvm_x86_ops->tlb_flush(vcpu); - set_bit(KVM_REQ_MMU_SYNC, &vcpu->requests); + kvm_set_cr3(vcpu, vcpu->arch.cr3); return 1; } diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 1821c207819..1f8510c51d6 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -411,7 +411,6 @@ static __init int svm_hardware_setup(void) iopm_va = page_address(iopm_pages); memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER)); - clear_bit(0x80, iopm_va); /* allow direct access to PC debug port */ iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT; if (boot_cpu_has(X86_FEATURE_NX)) @@ -796,6 +795,11 @@ static void svm_get_segment(struct kvm_vcpu *vcpu, var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1; var->g = (s->attrib >> SVM_SELECTOR_G_SHIFT) & 1; + /* AMD's VMCB does not have an explicit unusable field, so emulate it + * for cross vendor migration purposes by "not present" + */ + var->unusable = !var->present || (var->type == 0); + switch (seg) { case VCPU_SREG_CS: /* @@ -827,8 +831,6 @@ static void svm_get_segment(struct kvm_vcpu *vcpu, var->type |= 0x1; break; } - - var->unusable = !var->present; } static int svm_get_cpl(struct kvm_vcpu *vcpu) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 7c1ce5ac613..3944e917e79 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -338,6 +338,9 @@ EXPORT_SYMBOL_GPL(kvm_lmsw); void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) { + unsigned long old_cr4 = vcpu->arch.cr4; + unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE; + if (cr4 & CR4_RESERVED_BITS) { printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n"); kvm_inject_gp(vcpu, 0); @@ -351,7 +354,8 @@ void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) kvm_inject_gp(vcpu, 0); return; } - } else if (is_paging(vcpu) && !is_pae(vcpu) && (cr4 & X86_CR4_PAE) + } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE) + && ((cr4 ^ old_cr4) & pdptr_bits) && !load_pdptrs(vcpu, vcpu->arch.cr3)) { printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n"); kvm_inject_gp(vcpu, 0); @@ -1121,9 +1125,9 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) static int is_efer_nx(void) { - u64 efer; + unsigned long long efer = 0; - rdmsrl(MSR_EFER, efer); + rdmsrl_safe(MSR_EFER, &efer); return efer & EFER_NX; } @@ -1259,7 +1263,7 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, bit(X86_FEATURE_CMOV) | bit(X86_FEATURE_PSE36) | bit(X86_FEATURE_MMX) | bit(X86_FEATURE_FXSR) | bit(X86_FEATURE_SYSCALL) | - (bit(X86_FEATURE_NX) && is_efer_nx()) | + (is_efer_nx() ? bit(X86_FEATURE_NX) : 0) | #ifdef CONFIG_X86_64 bit(X86_FEATURE_LM) | #endif diff --git a/arch/x86/lguest/Makefile b/arch/x86/lguest/Makefile index 27f0c9ed7f6..94e0e54056a 100644 --- a/arch/x86/lguest/Makefile +++ b/arch/x86/lguest/Makefile @@ -1 +1,2 @@ obj-y := i386_head.o boot.o +CFLAGS_boot.o := $(call cc-option, -fno-stack-protector) diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index ca7ec44bafc..33a93b41739 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c @@ -67,6 +67,7 @@ #include #include #include +#include #include /* for struct machine_ops */ /*G:010 Welcome to the Guest! @@ -1088,13 +1089,21 @@ __init void lguest_init(void) * lguest_init() where the rest of the fairly chaotic boot setup * occurs. */ + /* The stack protector is a weird thing where gcc places a canary + * value on the stack and then checks it on return. This file is + * compiled with -fno-stack-protector it, so we got this far without + * problems. The value of the canary is kept at offset 20 from the + * %gs register, so we need to set that up before calling C functions + * in other files. */ + setup_stack_canary_segment(0); + /* We could just call load_stack_canary_segment(), but we might as + * call switch_to_new_gdt() which loads the whole table and sets up + * the per-cpu segment descriptor register %fs as well. */ + switch_to_new_gdt(0); + /* As described in head_32.S, we map the first 128M of memory. */ max_pfn_mapped = (128*1024*1024) >> PAGE_SHIFT; - /* Load the %fs segment register (the per-cpu segment register) with - * the normal data segment to get through booting. */ - asm volatile ("mov %0, %%fs" : : "r" (__KERNEL_DS) : "memory"); - /* The Host<->Guest Switcher lives at the top of our address space, and * the Host told us how big it is when we made LGUEST_INIT hypercall: * it put the answer in lguest_data.reserve_mem */ diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c index 8f307d914c2..f46c340727b 100644 --- a/arch/x86/mm/hugetlbpage.c +++ b/arch/x86/mm/hugetlbpage.c @@ -26,12 +26,16 @@ static unsigned long page_table_shareable(struct vm_area_struct *svma, unsigned long sbase = saddr & PUD_MASK; unsigned long s_end = sbase + PUD_SIZE; + /* Allow segments to share if only one is marked locked */ + unsigned long vm_flags = vma->vm_flags & ~VM_LOCKED; + unsigned long svm_flags = svma->vm_flags & ~VM_LOCKED; + /* * match the virtual addresses, permission and the alignment of the * page table page. */ if (pmd_index(addr) != pmd_index(saddr) || - vma->vm_flags != svma->vm_flags || + vm_flags != svm_flags || sbase < svma->vm_start || svma->vm_end < s_end) return 0; diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 797f9f107cb..e17efed088c 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -153,7 +153,7 @@ static void __cpa_flush_all(void *arg) */ __flush_tlb_all(); - if (cache && boot_cpu_data.x86_model >= 4) + if (cache && boot_cpu_data.x86 >= 4) wbinvd(); } @@ -208,20 +208,15 @@ static void cpa_flush_array(unsigned long *start, int numpages, int cache, int in_flags, struct page **pages) { unsigned int i, level; + unsigned long do_wbinvd = cache && numpages >= 1024; /* 4M threshold */ BUG_ON(irqs_disabled()); - on_each_cpu(__cpa_flush_range, NULL, 1); + on_each_cpu(__cpa_flush_all, (void *) do_wbinvd, 1); - if (!cache) + if (!cache || do_wbinvd) return; - /* 4M threshold */ - if (numpages >= 1024) { - if (boot_cpu_data.x86_model >= 4) - wbinvd(); - return; - } /* * We only need to flush on one CPU, * clflush is a MESI-coherent instruction that diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c index 04df67f8a7b..044897be021 100644 --- a/arch/x86/oprofile/backtrace.c +++ b/arch/x86/oprofile/backtrace.c @@ -76,9 +76,9 @@ void x86_backtrace(struct pt_regs * const regs, unsigned int depth) { struct frame_head *head = (struct frame_head *)frame_pointer(regs); - unsigned long stack = kernel_trap_sp(regs); if (!user_mode_vm(regs)) { + unsigned long stack = kernel_stack_pointer(regs); if (depth) dump_trace(NULL, regs, (unsigned long *)stack, 0, &backtrace_ops, &depth); diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c index 5fa10bb9604..8766b0e216c 100644 --- a/arch/x86/pci/mmconfig-shared.c +++ b/arch/x86/pci/mmconfig-shared.c @@ -375,7 +375,7 @@ static acpi_status __init check_mcfg_resource(struct acpi_resource *res, if (!fixmem32) return AE_OK; if ((mcfg_res->start >= fixmem32->address) && - (mcfg_res->end <= (fixmem32->address + + (mcfg_res->end < (fixmem32->address + fixmem32->address_length))) { mcfg_res->flags = 1; return AE_CTRL_TERMINATE; @@ -392,7 +392,7 @@ static acpi_status __init check_mcfg_resource(struct acpi_resource *res, return AE_OK; if ((mcfg_res->start >= address.minimum) && - (mcfg_res->end <= (address.minimum + address.address_length))) { + (mcfg_res->end < (address.minimum + address.address_length))) { mcfg_res->flags = 1; return AE_CTRL_TERMINATE; } @@ -418,7 +418,7 @@ static int __init is_acpi_reserved(u64 start, u64 end, unsigned not_used) struct resource mcfg_res; mcfg_res.start = start; - mcfg_res.end = end; + mcfg_res.end = end - 1; mcfg_res.flags = 0; acpi_get_devices("PNP0C01", find_mboard_resource, &mcfg_res, NULL); diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile index 3b767d03fd6..172438f86a0 100644 --- a/arch/x86/xen/Makefile +++ b/arch/x86/xen/Makefile @@ -9,5 +9,6 @@ obj-y := enlighten.o setup.o multicalls.o mmu.o irq.o \ time.o xen-asm.o xen-asm_$(BITS).o \ grant-table.o suspend.o -obj-$(CONFIG_SMP) += smp.o spinlock.o -obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o \ No newline at end of file +obj-$(CONFIG_SMP) += smp.o +obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o +obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index e25a78e1113..fba55b1a402 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -42,6 +42,7 @@ #include #include #include +#include #include #include diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h index 20139464943..ca6596b05d5 100644 --- a/arch/x86/xen/xen-ops.h +++ b/arch/x86/xen/xen-ops.h @@ -62,15 +62,26 @@ void xen_setup_vcpu_info_placement(void); #ifdef CONFIG_SMP void xen_smp_init(void); -void __init xen_init_spinlocks(void); -__cpuinit void xen_init_lock_cpu(int cpu); -void xen_uninit_lock_cpu(int cpu); - extern cpumask_var_t xen_cpu_initialized_map; #else static inline void xen_smp_init(void) {} #endif +#ifdef CONFIG_PARAVIRT_SPINLOCKS +void __init xen_init_spinlocks(void); +__cpuinit void xen_init_lock_cpu(int cpu); +void xen_uninit_lock_cpu(int cpu); +#else +static inline void xen_init_spinlocks(void) +{ +} +static inline void xen_init_lock_cpu(int cpu) +{ +} +static inline void xen_uninit_lock_cpu(int cpu) +{ +} +#endif /* Declare an asm function, along with symbols needed to make it inlineable */ diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index fa6dc4dd3b1..ebe228d02b0 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -80,6 +80,7 @@ config XTENSA_VARIANT_S6000 bool "s6000 - Stretch software configurable processor" select VARIANT_IRQ_SWITCH select ARCH_REQUIRE_GPIOLIB + select XTENSA_CALIBRATE_CCOUNT endchoice config XTENSA_UNALIGNED_USER @@ -137,6 +138,8 @@ config PCI source "drivers/pci/Kconfig" +endmenu + menu "Platform options" choice @@ -153,8 +156,6 @@ config XTENSA_PLATFORM_ISS config XTENSA_PLATFORM_XT2000 bool "XT2000" - select XTENSA_CALIBRATE_CCOUNT - select PCI help XT2000 is the name of Tensilica's feature-rich emulation platform. This hardware is capable of running a full Linux distribution. @@ -192,8 +193,6 @@ config CMDLINE source "mm/Kconfig" -endmenu - config HOTPLUG bool "Support for hot-pluggable devices" help diff --git a/arch/xtensa/configs/s6105_defconfig b/arch/xtensa/configs/s6105_defconfig index 6e1deff4159..768bee00603 100644 --- a/arch/xtensa/configs/s6105_defconfig +++ b/arch/xtensa/configs/s6105_defconfig @@ -115,7 +115,7 @@ CONFIG_XTENSA_VARIANT_S6000=y CONFIG_PREEMPT=y # CONFIG_MATH_EMULATION is not set # CONFIG_HIGHMEM is not set -# CONFIG_XTENSA_CALIBRATE_CCOUNT is not set +CONFIG_XTENSA_CALIBRATE_CCOUNT=y CONFIG_SERIAL_CONSOLE=y # CONFIG_XTENSA_ISS_NETWORK is not set @@ -131,7 +131,6 @@ CONFIG_SERIAL_CONSOLE=y # CONFIG_XTENSA_PLATFORM_ISS is not set # CONFIG_XTENSA_PLATFORM_XT2000 is not set CONFIG_XTENSA_PLATFORM_S6105=y -CONFIG_XTENSA_CPU_CLOCK=300 CONFIG_GENERIC_CALIBRATE_DELAY=y CONFIG_CMDLINE_BOOL=y CONFIG_CMDLINE="console=ttyS1,38400 debug bootmem_debug loglevel=7" diff --git a/arch/xtensa/include/asm/checksum.h b/arch/xtensa/include/asm/checksum.h index f84d3f00774..e4d831a3077 100644 --- a/arch/xtensa/include/asm/checksum.h +++ b/arch/xtensa/include/asm/checksum.h @@ -113,7 +113,8 @@ static __inline__ __sum16 ip_fast_csum(const void *iph, unsigned int ihl) are modified, we must also specify them as outputs, or gcc will assume they contain their original values. */ : "=r" (sum), "=r" (iph), "=r" (ihl), "=&r" (tmp), "=&r" (endaddr) - : "1" (iph), "2" (ihl)); + : "1" (iph), "2" (ihl) + : "memory"); return csum_fold(sum); } @@ -227,7 +228,8 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr, "1:\t" : "=r" (sum), "=&r" (__dummy) : "r" (saddr), "r" (daddr), - "r" (htonl(len)), "r" (htonl(proto)), "0" (sum)); + "r" (htonl(len)), "r" (htonl(proto)), "0" (sum) + : "memory"); return csum_fold(sum); } diff --git a/arch/xtensa/include/asm/timex.h b/arch/xtensa/include/asm/timex.h index b83a8181d44..053bc427210 100644 --- a/arch/xtensa/include/asm/timex.h +++ b/arch/xtensa/include/asm/timex.h @@ -39,9 +39,9 @@ #ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT extern unsigned long ccount_per_jiffy; -extern unsigned long ccount_nsec; +extern unsigned long nsec_per_ccount; #define CCOUNT_PER_JIFFY ccount_per_jiffy -#define NSEC_PER_CCOUNT ccount_nsec +#define NSEC_PER_CCOUNT nsec_per_ccount #else #define CCOUNT_PER_JIFFY (CONFIG_XTENSA_CPU_CLOCK*(1000000UL/HZ)) #define NSEC_PER_CCOUNT (1000UL / CONFIG_XTENSA_CPU_CLOCK) diff --git a/arch/xtensa/kernel/Makefile b/arch/xtensa/kernel/Makefile index 7419dbccf02..fe3186de6a3 100644 --- a/arch/xtensa/kernel/Makefile +++ b/arch/xtensa/kernel/Makefile @@ -4,15 +4,30 @@ extra-y := head.o vmlinux.lds - obj-y := align.o entry.o irq.o coprocessor.o process.o ptrace.o \ setup.o signal.o syscall.o time.o traps.o vectors.o platform.o \ pci-dma.o init_task.o io.o -## windowspill.o - obj-$(CONFIG_KGDB) += xtensa-stub.o obj-$(CONFIG_PCI) += pci.o obj-$(CONFIG_MODULES) += xtensa_ksyms.o module.o +# In the Xtensa architecture, assembly generates literals which must always +# precede the L32R instruction with a relative offset less than 256 kB. +# Therefore, the .text and .literal section must be combined in parenthesis +# in the linker script, such as: *(.literal .text). +# +# We need to post-process the generated vmlinux.lds scripts to convert +# *(xxx.text) to *(xxx.literal xxx.text) for the following text sections: +# .text .ref.text .*init.text .*exit.text .text.* +# +# Replicate rules in scripts/Makefile.build +sed-y = -e 's/(\(\.[a-z]*it\|\.ref\|\)\.text)/(\1.literal \1.text)/g' \ + -e 's/(\(\.text\.[a-z]*\))/(\1.literal \1)/g' + +quiet_cmd__cpp_lds_S = LDS $@ + cmd__cpp_lds_S = $(CPP) $(cpp_flags) -D__ASSEMBLY__ $< | sed $(sed-y) >$@ + +$(obj)/vmlinux.lds: $(src)/vmlinux.lds.S FORCE + $(call if_changed_dep,_cpp_lds_S) diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c index 9f0b71189e9..ba9ab934978 100644 --- a/arch/xtensa/kernel/traps.c +++ b/arch/xtensa/kernel/traps.c @@ -369,6 +369,18 @@ void show_regs(struct pt_regs * regs) regs->syscall); } +static __always_inline unsigned long *stack_pointer(struct task_struct *task) +{ + unsigned long *sp; + + if (!task || task == current) + __asm__ __volatile__ ("mov %0, a1\n" : "=a"(sp)); + else + sp = (unsigned long *)task->thread.sp; + + return sp; +} + void show_trace(struct task_struct *task, unsigned long *sp) { unsigned long a0, a1, pc; @@ -377,7 +389,7 @@ void show_trace(struct task_struct *task, unsigned long *sp) if (sp) a1 = (unsigned long)sp; else - a1 = task->thread.sp; + a1 = (unsigned long)stack_pointer(task); sp_start = a1 & ~(THREAD_SIZE-1); sp_end = sp_start + THREAD_SIZE; @@ -420,7 +432,7 @@ void show_stack(struct task_struct *task, unsigned long *sp) unsigned long *stack; if (!sp) - sp = (unsigned long *)task->thread.sp; + sp = stack_pointer(task); stack = sp; printk("\nStack: "); diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S index 5accf51053d..41c159cd872 100644 --- a/arch/xtensa/kernel/vmlinux.lds.S +++ b/arch/xtensa/kernel/vmlinux.lds.S @@ -87,7 +87,7 @@ SECTIONS { /* The HEAD_TEXT section must be the first section! */ HEAD_TEXT - *(.literal .text) + TEXT_TEXT VMLINUX_SYMBOL(__sched_text_start) = .; *(.sched.literal .sched.text) VMLINUX_SYMBOL(__sched_text_end) = .; @@ -139,8 +139,6 @@ SECTIONS __init_begin = .; .init.text : { _sinittext = .; - *(.init.literal) *(.cpuinit.literal) - *(.devinit.literal) *(.meminit.literal) INIT_TEXT _einittext = .; } diff --git a/arch/xtensa/platforms/s6105/setup.c b/arch/xtensa/platforms/s6105/setup.c index ae041d5027a..855ddeadc43 100644 --- a/arch/xtensa/platforms/s6105/setup.c +++ b/arch/xtensa/platforms/s6105/setup.c @@ -10,6 +10,8 @@ #include #include +#include + #include void platform_halt(void) @@ -47,6 +49,7 @@ void __init platform_setup(char **cmdline) void __init platform_init(bp_tag_t *first) { + s6_gpio_init(); gpio_request(GPIO_LED1_NGREEN, "led1_green"); gpio_request(GPIO_LED1_RED, "led1_red"); gpio_direction_output(GPIO_LED1_NGREEN, 1); diff --git a/arch/xtensa/variants/s6000/Makefile b/arch/xtensa/variants/s6000/Makefile index 03b3975468b..d83f3805130 100644 --- a/arch/xtensa/variants/s6000/Makefile +++ b/arch/xtensa/variants/s6000/Makefile @@ -1,3 +1,4 @@ # s6000 Makefile obj-y += irq.o gpio.o +obj-$(CONFIG_XTENSA_CALIBRATE_CCOUNT) += delay.o diff --git a/arch/xtensa/variants/s6000/delay.c b/arch/xtensa/variants/s6000/delay.c new file mode 100644 index 00000000000..54b2b573f16 --- /dev/null +++ b/arch/xtensa/variants/s6000/delay.c @@ -0,0 +1,27 @@ +#include +#include +#include +#include + +#define LOOPS 10 +void platform_calibrate_ccount(void) +{ + u32 uninitialized_var(a); + u32 uninitialized_var(u); + u32 b; + u32 tstamp = S6_REG_GREG1 + S6_GREG1_GLOBAL_TIMER; + int i = LOOPS+1; + do { + u32 t = u; + asm volatile( + "1: l32i %0, %2, 0 ;" + " beq %0, %1, 1b ;" + : "=&a"(u) : "a"(t), "a"(tstamp)); + b = xtensa_get_ccount(); + if (i == LOOPS) + a = b; + } while (--i >= 0); + b -= a; + nsec_per_ccount = (LOOPS * 10000) / b; + ccount_per_jiffy = b * (100000UL / (LOOPS * HZ)); +} diff --git a/arch/xtensa/variants/s6000/gpio.c b/arch/xtensa/variants/s6000/gpio.c index 33a8d952934..79317fdcf14 100644 --- a/arch/xtensa/variants/s6000/gpio.c +++ b/arch/xtensa/variants/s6000/gpio.c @@ -64,8 +64,7 @@ static struct gpio_chip gpiochip = { .exported = 0, /* no exporting to userspace */ }; -static int gpio_init(void) +int s6_gpio_init(void) { return gpiochip_add(&gpiochip); } -device_initcall(gpio_init); diff --git a/arch/xtensa/variants/s6000/include/variant/gpio.h b/arch/xtensa/variants/s6000/include/variant/gpio.h new file mode 100644 index 00000000000..8327f62167e --- /dev/null +++ b/arch/xtensa/variants/s6000/include/variant/gpio.h @@ -0,0 +1,6 @@ +#ifndef _XTENSA_VARIANT_S6000_GPIO_H +#define _XTENSA_VARIANT_S6000_GPIO_H + +extern int s6_gpio_init(void); + +#endif /* _XTENSA_VARIANT_S6000_GPIO_H */ diff --git a/block/blk-core.c b/block/blk-core.c index 2998fe3a237..c89883be873 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1768,10 +1768,10 @@ static int __end_that_request_first(struct request *req, int error, } else { int idx = bio->bi_idx + next_idx; - if (unlikely(bio->bi_idx >= bio->bi_vcnt)) { + if (unlikely(idx >= bio->bi_vcnt)) { blk_dump_rq_flags(req, "__end_that"); printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n", - __func__, bio->bi_idx, bio->bi_vcnt); + __func__, idx, bio->bi_vcnt); break; } diff --git a/block/bsg.c b/block/bsg.c index 206060e795d..dd81be455e0 100644 --- a/block/bsg.c +++ b/block/bsg.c @@ -315,6 +315,7 @@ out: blk_put_request(rq); if (next_rq) { blk_rq_unmap_user(next_rq->bio); + next_rq->bio = NULL; blk_put_request(next_rq); } return ERR_PTR(ret); @@ -448,6 +449,7 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr, hdr->dout_resid = rq->data_len; hdr->din_resid = rq->next_rq->data_len; blk_rq_unmap_user(bidi_bio); + rq->next_rq->bio = NULL; blk_put_request(rq->next_rq); } else if (rq_data_dir(rq) == READ) hdr->din_resid = rq->data_len; @@ -466,6 +468,7 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr, blk_rq_unmap_user(bio); if (rq->cmd != rq->__cmd) kfree(rq->cmd); + rq->bio = NULL; blk_put_request(rq); return ret; diff --git a/crypto/ahash.c b/crypto/ahash.c index b2d1ee32cfe..f3476374f76 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -82,10 +82,11 @@ int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err) if (err) return err; - walk->offset = 0; - - if (nbytes) + if (nbytes) { + walk->offset = 0; + walk->pg++; return hash_walk_next(walk); + } if (!walk->total) return 0; diff --git a/crypto/api.c b/crypto/api.c index 314dab96840..fd2545decb2 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -221,7 +221,8 @@ struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask) request_module(name); - if (!((type ^ CRYPTO_ALG_NEED_FALLBACK) & mask) && + if (!((type ^ CRYPTO_ALG_NEED_FALLBACK) & mask & + CRYPTO_ALG_NEED_FALLBACK) && snprintf(tmp, sizeof(tmp), "%s-all", name) < sizeof(tmp)) request_module(tmp); diff --git a/crypto/eseqiv.c b/crypto/eseqiv.c index 2a342c8e52b..3ca3b669d5d 100644 --- a/crypto/eseqiv.c +++ b/crypto/eseqiv.c @@ -153,7 +153,8 @@ static int eseqiv_givencrypt(struct skcipher_givcrypt_request *req) if (err) goto out; - eseqiv_complete2(req); + if (giv != req->giv) + eseqiv_complete2(req); out: return err; diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile index 17e50824a6f..72ac28da14e 100644 --- a/drivers/acpi/acpica/Makefile +++ b/drivers/acpi/acpica/Makefile @@ -5,40 +5,43 @@ ccflags-y := -Os ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT -obj-y := dsfield.o dsmthdat.o dsopcode.o dswexec.o dswscope.o \ +# use acpi.o to put all files here into acpi.o modparam namespace +obj-y += acpi.o + +acpi-y := dsfield.o dsmthdat.o dsopcode.o dswexec.o dswscope.o \ dsmethod.o dsobject.o dsutils.o dswload.o dswstate.o \ dsinit.o -obj-y += evevent.o evregion.o evsci.o evxfevnt.o \ +acpi-y += evevent.o evregion.o evsci.o evxfevnt.o \ evmisc.o evrgnini.o evxface.o evxfregn.o \ evgpe.o evgpeblk.o -obj-y += exconfig.o exfield.o exnames.o exoparg6.o exresolv.o exstorob.o\ +acpi-y += exconfig.o exfield.o exnames.o exoparg6.o exresolv.o exstorob.o\ exconvrt.o exfldio.o exoparg1.o exprep.o exresop.o exsystem.o\ excreate.o exmisc.o exoparg2.o exregion.o exstore.o exutils.o \ exdump.o exmutex.o exoparg3.o exresnte.o exstoren.o -obj-y += hwacpi.o hwgpe.o hwregs.o hwsleep.o hwxface.o hwvalid.o +acpi-y += hwacpi.o hwgpe.o hwregs.o hwsleep.o hwxface.o hwvalid.o -obj-$(ACPI_FUTURE_USAGE) += hwtimer.o +acpi-$(ACPI_FUTURE_USAGE) += hwtimer.o -obj-y += nsaccess.o nsload.o nssearch.o nsxfeval.o \ +acpi-y += nsaccess.o nsload.o nssearch.o nsxfeval.o \ nsalloc.o nseval.o nsnames.o nsutils.o nsxfname.o \ nsdump.o nsinit.o nsobject.o nswalk.o nsxfobj.o \ nsparse.o nspredef.o -obj-$(ACPI_FUTURE_USAGE) += nsdumpdv.o +acpi-$(ACPI_FUTURE_USAGE) += nsdumpdv.o -obj-y += psargs.o psparse.o psloop.o pstree.o pswalk.o \ +acpi-y += psargs.o psparse.o psloop.o pstree.o pswalk.o \ psopcode.o psscope.o psutils.o psxface.o -obj-y += rsaddr.o rscreate.o rsinfo.o rsio.o rslist.o rsmisc.o rsxface.o \ +acpi-y += rsaddr.o rscreate.o rsinfo.o rsio.o rslist.o rsmisc.o rsxface.o \ rscalc.o rsirq.o rsmemory.o rsutils.o -obj-$(ACPI_FUTURE_USAGE) += rsdump.o +acpi-$(ACPI_FUTURE_USAGE) += rsdump.o -obj-y += tbxface.o tbinstal.o tbutils.o tbfind.o tbfadt.o tbxfroot.o +acpi-y += tbxface.o tbinstal.o tbutils.o tbfind.o tbfadt.o tbxfroot.o -obj-y += utalloc.o utdebug.o uteval.o utinit.o utmisc.o utxface.o \ +acpi-y += utalloc.o utdebug.o uteval.o utinit.o utmisc.o utxface.o \ utcopy.o utdelete.o utglobal.o utmath.o utobject.o \ utstate.o utmutex.o utobject.o utresrc.o utlock.o diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h index 772ee5c4ccc..2ec394a328e 100644 --- a/drivers/acpi/acpica/aclocal.h +++ b/drivers/acpi/acpica/aclocal.h @@ -787,7 +787,12 @@ struct acpi_bit_register_info { /* For control registers, both ignored and reserved bits must be preserved */ -#define ACPI_PM1_CONTROL_IGNORED_BITS 0x0201 /* Bits 9, 0(SCI_EN) */ +/* + * The ACPI spec says to ignore PM1_CTL.SCI_EN (bit 0) + * but we need to be able to write ACPI_BITREG_SCI_ENABLE directly + * as a BIOS workaround on some machines. + */ +#define ACPI_PM1_CONTROL_IGNORED_BITS 0x0200 /* Bits 9 */ #define ACPI_PM1_CONTROL_RESERVED_BITS 0xC1F8 /* Bits 14-15, 3-8 */ #define ACPI_PM1_CONTROL_PRESERVED_BITS \ (ACPI_PM1_CONTROL_IGNORED_BITS | ACPI_PM1_CONTROL_RESERVED_BITS) diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index e8f7b64e92d..ae862f1798d 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -312,7 +312,7 @@ int acpi_bus_set_power(acpi_handle handle, int state) end: if (result) printk(KERN_WARNING PREFIX - "Transitioning device [%s] to D%d\n", + "Device [%s] failed to transition to D%d\n", device->pnp.bus_id, state); else { device->power.state = state; diff --git a/drivers/acpi/pci_bind.c b/drivers/acpi/pci_bind.c index 95650f83ce2..bc46de3d967 100644 --- a/drivers/acpi/pci_bind.c +++ b/drivers/acpi/pci_bind.c @@ -116,9 +116,6 @@ int acpi_pci_bind(struct acpi_device *device) struct acpi_pci_data *pdata; struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; acpi_handle handle; - struct pci_dev *dev; - struct pci_bus *bus; - if (!device || !device->parent) return -EINVAL; @@ -176,20 +173,9 @@ int acpi_pci_bind(struct acpi_device *device) * Locate matching device in PCI namespace. If it doesn't exist * this typically means that the device isn't currently inserted * (e.g. docking station, port replicator, etc.). - * We cannot simply search the global pci device list, since - * PCI devices are added to the global pci list when the root - * bridge start ops are run, which may not have happened yet. */ - bus = pci_find_bus(data->id.segment, data->id.bus); - if (bus) { - list_for_each_entry(dev, &bus->devices, bus_list) { - if (dev->devfn == PCI_DEVFN(data->id.device, - data->id.function)) { - data->dev = dev; - break; - } - } - } + data->dev = pci_get_slot(pdata->bus, + PCI_DEVFN(data->id.device, data->id.function)); if (!data->dev) { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device %04x:%02x:%02x.%d not present in PCI namespace\n", @@ -259,9 +245,10 @@ int acpi_pci_bind(struct acpi_device *device) end: kfree(buffer.pointer); - if (result) + if (result) { + pci_dev_put(data->dev); kfree(data); - + } return result; } @@ -303,6 +290,7 @@ static int acpi_pci_unbind(struct acpi_device *device) if (data->dev->subordinate) { acpi_pci_irq_del_prt(data->id.segment, data->bus->number); } + pci_dev_put(data->dev); kfree(data); end: diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c index 45ad3288c5f..23f0fb84f1c 100644 --- a/drivers/acpi/processor_core.c +++ b/drivers/acpi/processor_core.c @@ -844,7 +844,7 @@ static int acpi_processor_add(struct acpi_device *device) if (!pr) return -ENOMEM; - if (!alloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) { + if (!zalloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) { kfree(pr); return -ENOMEM; } diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index f7ca8c55956..10a2d913635 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -148,6 +148,9 @@ static void acpi_timer_check_state(int state, struct acpi_processor *pr, if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT)) return; + if (boot_cpu_has(X86_FEATURE_AMDC1E)) + type = ACPI_STATE_C1; + /* * Check, if one of the previous states already marked the lapic * unstable @@ -202,21 +205,44 @@ static void acpi_state_timer_broadcast(struct acpi_processor *pr, * Suspend / resume control */ static int acpi_idle_suspend; +static u32 saved_bm_rld; + +static void acpi_idle_bm_rld_save(void) +{ + acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &saved_bm_rld); +} +static void acpi_idle_bm_rld_restore(void) +{ + u32 resumed_bm_rld; + + acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &resumed_bm_rld); + + if (resumed_bm_rld != saved_bm_rld) + acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, saved_bm_rld); +} int acpi_processor_suspend(struct acpi_device * device, pm_message_t state) { + if (acpi_idle_suspend == 1) + return 0; + + acpi_idle_bm_rld_save(); acpi_idle_suspend = 1; return 0; } int acpi_processor_resume(struct acpi_device * device) { + if (acpi_idle_suspend == 0) + return 0; + + acpi_idle_bm_rld_restore(); acpi_idle_suspend = 0; return 0; } #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86) -static int tsc_halts_in_c(int state) +static void tsc_check_state(int state) { switch (boot_cpu_data.x86_vendor) { case X86_VENDOR_AMD: @@ -226,13 +252,17 @@ static int tsc_halts_in_c(int state) * C/P/S0/S1 states when this bit is set. */ if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) - return 0; + return; /*FALL THROUGH*/ default: - return state > ACPI_STATE_C1; + /* TSC could halt in idle, so notify users */ + if (state > ACPI_STATE_C1) + mark_tsc_unstable("TSC halts in idle"); } } +#else +static void tsc_check_state(int state) { return; } #endif static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr) @@ -578,17 +608,13 @@ static int acpi_processor_power_verify(struct acpi_processor *pr) pr->power.timer_broadcast_on_state = INT_MAX; - for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { + for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) { struct acpi_processor_cx *cx = &pr->power.states[i]; -#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86) - /* TSC could halt in idle, so notify users */ - if (tsc_halts_in_c(cx->type)) - mark_tsc_unstable("TSC halts in idle");; -#endif switch (cx->type) { case ACPI_STATE_C1: cx->valid = 1; + acpi_timer_check_state(i, pr, cx); break; case ACPI_STATE_C2: @@ -603,6 +629,8 @@ static int acpi_processor_power_verify(struct acpi_processor *pr) acpi_timer_check_state(i, pr, cx); break; } + if (cx->valid) + tsc_check_state(cx->type); if (cx->valid) working++; @@ -806,11 +834,12 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev, /* Do not access any ACPI IO ports in suspend path */ if (acpi_idle_suspend) { - acpi_safe_halt(); local_irq_enable(); + cpu_relax(); return 0; } + acpi_state_timer_broadcast(pr, cx, 1); kt1 = ktime_get_real(); acpi_idle_do_entry(cx); kt2 = ktime_get_real(); @@ -818,6 +847,7 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev, local_irq_enable(); cx->usage++; + acpi_state_timer_broadcast(pr, cx, 0); return idle_time; } diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c index cafb41000f6..60e543d3234 100644 --- a/drivers/acpi/processor_perflib.c +++ b/drivers/acpi/processor_perflib.c @@ -309,9 +309,15 @@ static int acpi_processor_get_performance_states(struct acpi_processor *pr) (u32) px->bus_master_latency, (u32) px->control, (u32) px->status)); - if (!px->core_frequency) { - printk(KERN_ERR PREFIX - "Invalid _PSS data: freq is zero\n"); + /* + * Check that ACPI's u64 MHz will be valid as u32 KHz in cpufreq + */ + if (!px->core_frequency || + ((u32)(px->core_frequency * 1000) != + (px->core_frequency * 1000))) { + printk(KERN_ERR FW_BUG PREFIX + "Invalid BIOS _PSS frequency: 0x%llx MHz\n", + px->core_frequency); result = -EFAULT; kfree(pr->performance->states); goto end; diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c index d0d1f4d5043..227543789ba 100644 --- a/drivers/acpi/processor_throttling.c +++ b/drivers/acpi/processor_throttling.c @@ -45,6 +45,14 @@ #define _COMPONENT ACPI_PROCESSOR_COMPONENT ACPI_MODULE_NAME("processor_throttling"); +/* ignore_tpc: + * 0 -> acpi processor driver doesn't ignore _TPC values + * 1 -> acpi processor driver ignores _TPC values + */ +static int ignore_tpc; +module_param(ignore_tpc, int, 0644); +MODULE_PARM_DESC(ignore_tpc, "Disable broken BIOS _TPC throttling support"); + struct throttling_tstate { unsigned int cpu; /* cpu nr */ int target_state; /* target T-state */ @@ -283,6 +291,10 @@ static int acpi_processor_get_platform_limit(struct acpi_processor *pr) if (!pr) return -EINVAL; + + if (ignore_tpc) + goto end; + status = acpi_evaluate_integer(pr->handle, "_TPC", NULL, &tpc); if (ACPI_FAILURE(status)) { if (status != AE_NOT_FOUND) { @@ -290,6 +302,8 @@ static int acpi_processor_get_platform_limit(struct acpi_processor *pr) } return -ENODEV; } + +end: pr->throttling_platform_limit = (int)tpc; return 0; } @@ -302,6 +316,9 @@ int acpi_processor_tstate_has_changed(struct acpi_processor *pr) struct acpi_processor_limit *limit; int target_state; + if (ignore_tpc) + return 0; + result = acpi_processor_get_platform_limit(pr); if (result) { /* Throttling Limit is unsupported */ @@ -821,6 +838,14 @@ static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr) ret = acpi_read_throttling_status(pr, &value); if (ret >= 0) { state = acpi_get_throttling_state(pr, value); + if (state == -1) { + ACPI_WARNING((AE_INFO, + "Invalid throttling state, reset")); + state = 0; + ret = acpi_processor_set_throttling(pr, state); + if (ret) + return ret; + } pr->throttling.state = state; } diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index d7ff61c0d57..1bdfb37377e 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c @@ -538,6 +538,57 @@ acpi_video_device_lcd_set_level(struct acpi_video_device *device, int level) return -EINVAL; } +/* + * For some buggy _BQC methods, we need to add a constant value to + * the _BQC return value to get the actual current brightness level + */ + +static int bqc_offset_aml_bug_workaround; +static int __init video_set_bqc_offset(const struct dmi_system_id *d) +{ + bqc_offset_aml_bug_workaround = 9; + return 0; +} + +static struct dmi_system_id video_dmi_table[] __initdata = { + /* + * Broken _BQC workaround http://bugzilla.kernel.org/show_bug.cgi?id=13121 + */ + { + .callback = video_set_bqc_offset, + .ident = "Acer Aspire 5720", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5720"), + }, + }, + { + .callback = video_set_bqc_offset, + .ident = "Acer Aspire 5710Z", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5710Z"), + }, + }, + { + .callback = video_set_bqc_offset, + .ident = "eMachines E510", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "EMACHINES"), + DMI_MATCH(DMI_PRODUCT_NAME, "eMachines E510"), + }, + }, + { + .callback = video_set_bqc_offset, + .ident = "Acer Aspire 5315", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5315"), + }, + }, + {} +}; + static int acpi_video_device_lcd_get_level_current(struct acpi_video_device *device, unsigned long long *level) @@ -557,6 +608,7 @@ acpi_video_device_lcd_get_level_current(struct acpi_video_device *device, *level = device->brightness->levels[*level + 2]; } + *level += bqc_offset_aml_bug_workaround; device->brightness->curr = *level; return 0; } else { @@ -2290,13 +2342,15 @@ EXPORT_SYMBOL(acpi_video_register); static int __init acpi_video_init(void) { + dmi_check_system(video_dmi_table); + if (intel_opregion_present()) return 0; return acpi_video_register(); } -void __exit acpi_video_exit(void) +void acpi_video_exit(void) { acpi_bus_unregister_driver(&acpi_video_bus); diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 08186ecbaf8..6b91c26a463 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -220,6 +220,7 @@ enum { AHCI_HFLAG_NO_HOTPLUG = (1 << 7), /* ignore PxSERR.DIAG.N */ AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */ AHCI_HFLAG_YES_NCQ = (1 << 9), /* force NCQ cap on */ + AHCI_HFLAG_NO_SUSPEND = (1 << 10), /* don't suspend */ /* ap->flags bits */ @@ -2316,9 +2317,17 @@ static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg) static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) { struct ata_host *host = dev_get_drvdata(&pdev->dev); + struct ahci_host_priv *hpriv = host->private_data; void __iomem *mmio = host->iomap[AHCI_PCI_BAR]; u32 ctl; + if (mesg.event & PM_EVENT_SUSPEND && + hpriv->flags & AHCI_HFLAG_NO_SUSPEND) { + dev_printk(KERN_ERR, &pdev->dev, + "BIOS update required for suspend/resume\n"); + return -EIO; + } + if (mesg.event & PM_EVENT_SLEEP) { /* AHCI spec rev1.1 section 8.3.3: * Software must disable interrupts prior to requesting a @@ -2610,6 +2619,63 @@ static bool ahci_broken_system_poweroff(struct pci_dev *pdev) return false; } +static bool ahci_broken_suspend(struct pci_dev *pdev) +{ + static const struct dmi_system_id sysids[] = { + /* + * On HP dv[4-6] and HDX18 with earlier BIOSen, link + * to the harddisk doesn't become online after + * resuming from STR. Warn and fail suspend. + */ + { + .ident = "dv4", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, + "HP Pavilion dv4 Notebook PC"), + }, + .driver_data = "F.30", /* cutoff BIOS version */ + }, + { + .ident = "dv5", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, + "HP Pavilion dv5 Notebook PC"), + }, + .driver_data = "F.16", /* cutoff BIOS version */ + }, + { + .ident = "dv6", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, + "HP Pavilion dv6 Notebook PC"), + }, + .driver_data = "F.21", /* cutoff BIOS version */ + }, + { + .ident = "HDX18", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, + "HP HDX18 Notebook PC"), + }, + .driver_data = "F.23", /* cutoff BIOS version */ + }, + { } /* terminate list */ + }; + const struct dmi_system_id *dmi = dmi_first_match(sysids); + const char *ver; + + if (!dmi || pdev->bus->number || pdev->devfn != PCI_DEVFN(0x1f, 2)) + return false; + + ver = dmi_get_system_info(DMI_BIOS_VERSION); + + return !ver || strcmp(ver, dmi->driver_data) < 0; +} + static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { static int printed_version; @@ -2715,6 +2781,12 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) "quirky BIOS, skipping spindown on poweroff\n"); } + if (ahci_broken_suspend(pdev)) { + hpriv->flags |= AHCI_HFLAG_NO_SUSPEND; + dev_printk(KERN_WARNING, &pdev->dev, + "BIOS update required for suspend/resume\n"); + } + /* CAP.NP sometimes indicate the index of the last enabled * port, at other times, that of the last possible port, so * determining the maximum port number requires looking at diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c index 942d14ac879..1aeb7082b0c 100644 --- a/drivers/ata/ata_piix.c +++ b/drivers/ata/ata_piix.c @@ -72,6 +72,7 @@ * ICH2 spec c #20 - IDE PRD must not cross a 64K boundary * and must be dword aligned * ICH2 spec c #24 - UDMA mode 4,5 t85/86 should be 6ns not 3.3 + * ICH7 errata #16 - MWDMA1 timings are incorrect * * Should have been BIOS fixed: * 450NX: errata #19 - DMA hangs on old 450NX @@ -94,7 +95,7 @@ #include #define DRV_NAME "ata_piix" -#define DRV_VERSION "2.12" +#define DRV_VERSION "2.13" enum { PIIX_IOCFG = 0x54, /* IDE I/O configuration register */ @@ -136,6 +137,7 @@ enum piix_controller_ids { ich_pata_33, /* ICH up to UDMA 33 only */ ich_pata_66, /* ICH up to 66 Mhz */ ich_pata_100, /* ICH up to UDMA 100 */ + ich_pata_100_nomwdma1, /* ICH up to UDMA 100 but with no MWDMA1*/ ich5_sata, ich6_sata, ich6m_sata, @@ -216,8 +218,8 @@ static const struct pci_device_id piix_pci_tbl[] = { /* ICH6 (and 6) (i915) UDMA 100 */ { 0x8086, 0x266F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 }, /* ICH7/7-R (i945, i975) UDMA 100*/ - { 0x8086, 0x27DF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 }, - { 0x8086, 0x269E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 }, + { 0x8086, 0x27DF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100_nomwdma1 }, + { 0x8086, 0x269E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100_nomwdma1 }, /* ICH8 Mobile PATA Controller */ { 0x8086, 0x2850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 }, @@ -487,6 +489,15 @@ static struct ata_port_info piix_port_info[] = { .port_ops = &ich_pata_ops, }, + [ich_pata_100_nomwdma1] = + { + .flags = PIIX_PATA_FLAGS | PIIX_FLAG_CHECKINTR, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2_ONLY, + .udma_mask = ATA_UDMA5, + .port_ops = &ich_pata_ops, + }, + [ich5_sata] = { .flags = PIIX_SATA_FLAGS, @@ -594,6 +605,7 @@ static const struct ich_laptop ich_laptop[] = { { 0x24CA, 0x1025, 0x003d }, /* ICH4 on ACER TM290 */ { 0x266F, 0x1025, 0x0066 }, /* ICH6 on ACER Aspire 1694WLMi */ { 0x2653, 0x1043, 0x82D8 }, /* ICH6M on Asus Eee 701 */ + { 0x27df, 0x104d, 0x900e }, /* ICH7 on Sony TZ-90 */ /* end marker */ { 0, } }; @@ -1443,6 +1455,15 @@ static bool piix_broken_system_poweroff(struct pci_dev *pdev) /* PCI slot number of the controller */ .driver_data = (void *)0x1FUL, }, + { + .ident = "HP Compaq nc6000", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nc6000"), + }, + /* PCI slot number of the controller */ + .driver_data = (void *)0x1FUL, + }, { } /* terminate list */ }; diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 17c5d48a75d..c9242301cfa 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -4091,7 +4091,9 @@ int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class, /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */ if (ata_class_enabled(new_class) && - new_class != ATA_DEV_ATA && new_class != ATA_DEV_ATAPI) { + new_class != ATA_DEV_ATA && + new_class != ATA_DEV_ATAPI && + new_class != ATA_DEV_SEMB) { ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n", dev->class, new_class); rc = -ENODEV; diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 01831312c36..94919ad03df 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -2783,6 +2783,12 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link, } else if (dev->class == ATA_DEV_UNKNOWN && ehc->tries[dev->devno] && ata_class_enabled(ehc->classes[dev->devno])) { + /* Temporarily set dev->class, it will be + * permanently set once all configurations are + * complete. This is necessary because new + * device configuration is done in two + * separate loops. + */ dev->class = ehc->classes[dev->devno]; if (dev->class == ATA_DEV_PMP) @@ -2790,6 +2796,11 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link, else rc = ata_dev_read_id(dev, &dev->class, readid_flags, dev->id); + + /* read_id might have changed class, store and reset */ + ehc->classes[dev->devno] = dev->class; + dev->class = ATA_DEV_UNKNOWN; + switch (rc) { case 0: /* clear error info accumulated during probe */ @@ -2799,13 +2810,11 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link, case -ENOENT: /* IDENTIFY was issued to non-existent * device. No need to reset. Just - * thaw and kill the device. + * thaw and ignore the device. */ ata_eh_thaw_port(ap); - dev->class = ATA_DEV_UNKNOWN; break; default: - dev->class = ATA_DEV_UNKNOWN; goto err; } } @@ -2826,11 +2835,15 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link, dev->class == ATA_DEV_PMP) continue; + dev->class = ehc->classes[dev->devno]; + ehc->i.flags |= ATA_EHI_PRINTINFO; rc = ata_dev_configure(dev); ehc->i.flags &= ~ATA_EHI_PRINTINFO; - if (rc) + if (rc) { + dev->class = ATA_DEV_UNKNOWN; goto err; + } spin_lock_irqsave(ap->lock, flags); ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG; @@ -3494,6 +3507,8 @@ static void ata_eh_handle_port_suspend(struct ata_port *ap) */ static void ata_eh_handle_port_resume(struct ata_port *ap) { + struct ata_link *link; + struct ata_device *dev; unsigned long flags; int rc = 0; @@ -3508,6 +3523,17 @@ static void ata_eh_handle_port_resume(struct ata_port *ap) WARN_ON(!(ap->pflags & ATA_PFLAG_SUSPENDED)); + /* + * Error timestamps are in jiffies which doesn't run while + * suspended and PHY events during resume isn't too uncommon. + * When the two are combined, it can lead to unnecessary speed + * downs if the machine is suspended and resumed repeatedly. + * Clear error history. + */ + ata_for_each_link(link, ap, HOST_FIRST) + ata_for_each_dev(dev, link, ALL) + ata_ering_clear(&dev->ering); + ata_acpi_set_state(ap, PMSG_ON); if (ap->ops->port_resume) diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 2733b0c90b7..342316064e9 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -313,7 +313,7 @@ ata_scsi_em_message_show(struct device *dev, struct device_attribute *attr, return ap->ops->em_show(ap, buf); return -EINVAL; } -DEVICE_ATTR(em_message, S_IRUGO | S_IWUGO, +DEVICE_ATTR(em_message, S_IRUGO | S_IWUSR, ata_scsi_em_message_show, ata_scsi_em_message_store); EXPORT_SYMBOL_GPL(dev_attr_em_message); @@ -366,7 +366,7 @@ ata_scsi_activity_store(struct device *dev, struct device_attribute *attr, } return -EINVAL; } -DEVICE_ATTR(sw_activity, S_IWUGO | S_IRUGO, ata_scsi_activity_show, +DEVICE_ATTR(sw_activity, S_IWUSR | S_IRUGO, ata_scsi_activity_show, ata_scsi_activity_store); EXPORT_SYMBOL_GPL(dev_attr_sw_activity); @@ -2142,13 +2142,14 @@ static unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf) static unsigned int ata_scsiop_inq_b1(struct ata_scsi_args *args, u8 *rbuf) { + int form_factor = ata_id_form_factor(args->id); + int media_rotation_rate = ata_id_rotation_rate(args->id); + rbuf[1] = 0xb1; rbuf[3] = 0x3c; - if (ata_id_major_version(args->id) > 7) { - rbuf[4] = args->id[217] >> 8; - rbuf[5] = args->id[217]; - rbuf[7] = args->id[168] & 0xf; - } + rbuf[4] = media_rotation_rate >> 8; + rbuf[5] = media_rotation_rate; + rbuf[7] = form_factor; return 0; } @@ -2376,7 +2377,23 @@ saving_not_supp: */ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf) { - u64 last_lba = args->dev->n_sectors - 1; /* LBA of the last block */ + struct ata_device *dev = args->dev; + u64 last_lba = dev->n_sectors - 1; /* LBA of the last block */ + u8 log_per_phys = 0; + u16 lowest_aligned = 0; + u16 word_106 = dev->id[106]; + u16 word_209 = dev->id[209]; + + if ((word_106 & 0xc000) == 0x4000) { + /* Number and offset of logical sectors per physical sector */ + if (word_106 & (1 << 13)) + log_per_phys = word_106 & 0xf; + if ((word_209 & 0xc000) == 0x4000) { + u16 first = dev->id[209] & 0x3fff; + if (first > 0) + lowest_aligned = (1 << log_per_phys) - first; + } + } VPRINTK("ENTER\n"); @@ -2407,6 +2424,11 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf) /* sector size */ rbuf[10] = ATA_SECT_SIZE >> 8; rbuf[11] = ATA_SECT_SIZE & 0xff; + + rbuf[12] = 0; + rbuf[13] = log_per_phys; + rbuf[14] = (lowest_aligned >> 8) & 0x3f; + rbuf[15] = lowest_aligned; } return 0; diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c index 751b7ea4816..fc9c5d6d7d8 100644 --- a/drivers/ata/pata_ali.c +++ b/drivers/ata/pata_ali.c @@ -497,14 +497,16 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id) }; /* Revision 0x20 added DMA */ static const struct ata_port_info info_20 = { - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48, + .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48 | + ATA_FLAG_IGN_SIMPLEX, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .port_ops = &ali_20_port_ops }; /* Revision 0x20 with support logic added UDMA */ static const struct ata_port_info info_20_udma = { - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48, + .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48 | + ATA_FLAG_IGN_SIMPLEX, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA2, @@ -512,7 +514,8 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id) }; /* Revision 0xC2 adds UDMA66 */ static const struct ata_port_info info_c2 = { - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48, + .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48 | + ATA_FLAG_IGN_SIMPLEX, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA4, @@ -520,7 +523,8 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id) }; /* Revision 0xC3 is UDMA66 for now */ static const struct ata_port_info info_c3 = { - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48, + .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48 | + ATA_FLAG_IGN_SIMPLEX, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA4, @@ -528,7 +532,8 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id) }; /* Revision 0xC4 is UDMA100 */ static const struct ata_port_info info_c4 = { - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48, + .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48 | + ATA_FLAG_IGN_SIMPLEX, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA5, @@ -536,7 +541,7 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id) }; /* Revision 0xC5 is UDMA133 with LBA48 DMA */ static const struct ata_port_info info_c5 = { - .flags = ATA_FLAG_SLAVE_POSS, + .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_IGN_SIMPLEX, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA6, diff --git a/drivers/ata/pata_efar.c b/drivers/ata/pata_efar.c index 2085e0a3a05..2a6412f5d11 100644 --- a/drivers/ata/pata_efar.c +++ b/drivers/ata/pata_efar.c @@ -22,7 +22,7 @@ #include #define DRV_NAME "pata_efar" -#define DRV_VERSION "0.4.4" +#define DRV_VERSION "0.4.5" /** * efar_pre_reset - Enable bits @@ -98,18 +98,17 @@ static void efar_set_piomode (struct ata_port *ap, struct ata_device *adev) { 2, 1 }, { 2, 3 }, }; - if (pio > 2) - control |= 1; /* TIME1 enable */ + if (pio > 1) + control |= 1; /* TIME */ if (ata_pio_need_iordy(adev)) /* PIO 3/4 require IORDY */ - control |= 2; /* IE enable */ - /* Intel specifies that the PPE functionality is for disk only */ + control |= 2; /* IE */ + /* Intel specifies that the prefetch/posting is for disk only */ if (adev->class == ATA_DEV_ATA) - control |= 4; /* PPE enable */ + control |= 4; /* PPE */ pci_read_config_word(dev, idetm_port, &idetm_data); - /* Enable PPE, IE and TIME as appropriate */ - + /* Set PPE, IE, and TIME as appropriate */ if (adev->devno == 0) { idetm_data &= 0xCCF0; idetm_data |= control; @@ -129,7 +128,7 @@ static void efar_set_piomode (struct ata_port *ap, struct ata_device *adev) pci_write_config_byte(dev, 0x44, slave_data); } - idetm_data |= 0x4000; /* Ensure SITRE is enabled */ + idetm_data |= 0x4000; /* Ensure SITRE is set */ pci_write_config_word(dev, idetm_port, idetm_data); } diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c index f72c6c5b820..6932e56d179 100644 --- a/drivers/ata/pata_legacy.c +++ b/drivers/ata/pata_legacy.c @@ -48,6 +48,7 @@ * */ +#include #include #include #include @@ -1028,6 +1029,7 @@ static __init int legacy_init_one(struct legacy_probe *probe) &legacy_sht); if (ret) goto fail; + async_synchronize_full(); ld->platform_dev = pdev; /* Nothing found means we drop the port as its probably not there */ diff --git a/drivers/ata/pata_netcell.c b/drivers/ata/pata_netcell.c index bdb236957cb..f0d52f72f5b 100644 --- a/drivers/ata/pata_netcell.c +++ b/drivers/ata/pata_netcell.c @@ -20,13 +20,24 @@ /* No PIO or DMA methods needed for this device */ +static unsigned int netcell_read_id(struct ata_device *adev, + struct ata_taskfile *tf, u16 *id) +{ + unsigned int err_mask = ata_do_dev_read_id(adev, tf, id); + /* Firmware forgets to mark words 85-87 valid */ + if (err_mask == 0) + id[ATA_ID_CSF_DEFAULT] |= 0x4000; + return err_mask; +} + static struct scsi_host_template netcell_sht = { ATA_BMDMA_SHT(DRV_NAME), }; static struct ata_port_operations netcell_ops = { .inherits = &ata_bmdma_port_ops, - .cable_detect = ata_cable_80wire, + .cable_detect = ata_cable_80wire, + .read_id = netcell_read_id, }; diff --git a/drivers/ata/pata_pdc202xx_old.c b/drivers/ata/pata_pdc202xx_old.c index 5fedb3d4032..2f3c9bed63d 100644 --- a/drivers/ata/pata_pdc202xx_old.c +++ b/drivers/ata/pata_pdc202xx_old.c @@ -2,7 +2,7 @@ * pata_pdc202xx_old.c - Promise PDC202xx PATA for new ATA layer * (C) 2005 Red Hat Inc * Alan Cox - * (C) 2007 Bartlomiej Zolnierkiewicz + * (C) 2007,2009 Bartlomiej Zolnierkiewicz * * Based in part on linux/drivers/ide/pci/pdc202xx_old.c * @@ -158,7 +158,7 @@ static void pdc2026x_bmdma_start(struct ata_queued_cmd *qc) u32 len; /* Check we keep host level locking here */ - if (adev->dma_mode >= XFER_UDMA_2) + if (adev->dma_mode > XFER_UDMA_2) iowrite8(ioread8(clock) | sel66, clock); else iowrite8(ioread8(clock) & ~sel66, clock); @@ -212,7 +212,7 @@ static void pdc2026x_bmdma_stop(struct ata_queued_cmd *qc) iowrite8(ioread8(clock) & ~sel66, clock); } /* Flip back to 33Mhz for PIO */ - if (adev->dma_mode >= XFER_UDMA_2) + if (adev->dma_mode > XFER_UDMA_2) iowrite8(ioread8(clock) & ~sel66, clock); ata_bmdma_stop(qc); pdc202xx_set_piomode(ap, adev); diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c index c2e90e1fece..36b8629203b 100644 --- a/drivers/ata/sata_fsl.c +++ b/drivers/ata/sata_fsl.c @@ -205,6 +205,7 @@ struct cmdhdr_tbl_entry { * Description information bitdefs */ enum { + CMD_DESC_RES = (1 << 11), VENDOR_SPECIFIC_BIST = (1 << 10), CMD_DESC_SNOOP_ENABLE = (1 << 9), FPDMA_QUEUED_CMD = (1 << 8), @@ -332,13 +333,14 @@ static unsigned int sata_fsl_fill_sg(struct ata_queued_cmd *qc, void *cmd_desc, dma_addr_t sg_addr = sg_dma_address(sg); u32 sg_len = sg_dma_len(sg); - VPRINTK("SATA FSL : fill_sg, sg_addr = 0x%x, sg_len = %d\n", - sg_addr, sg_len); + VPRINTK("SATA FSL : fill_sg, sg_addr = 0x%llx, sg_len = %d\n", + (unsigned long long)sg_addr, sg_len); /* warn if each s/g element is not dword aligned */ if (sg_addr & 0x03) ata_port_printk(qc->ap, KERN_ERR, - "s/g addr unaligned : 0x%x\n", sg_addr); + "s/g addr unaligned : 0x%llx\n", + (unsigned long long)sg_addr); if (sg_len & 0x03) ata_port_printk(qc->ap, KERN_ERR, "s/g len unaligned : 0x%x\n", sg_len); @@ -387,7 +389,7 @@ static void sata_fsl_qc_prep(struct ata_queued_cmd *qc) void __iomem *hcr_base = host_priv->hcr_base; unsigned int tag = sata_fsl_tag(qc->tag, hcr_base); struct command_desc *cd; - u32 desc_info = CMD_DESC_SNOOP_ENABLE; + u32 desc_info = CMD_DESC_RES | CMD_DESC_SNOOP_ENABLE; u32 num_prde = 0; u32 ttl_dwords = 0; dma_addr_t cd_paddr; @@ -840,7 +842,7 @@ issue_srst: /* device reset/SRST is a control register update FIS, uses tag0 */ sata_fsl_setup_cmd_hdr_entry(pp, 0, - SRST_CMD | CMD_DESC_SNOOP_ENABLE, 0, 0, 5); + SRST_CMD | CMD_DESC_RES | CMD_DESC_SNOOP_ENABLE, 0, 0, 5); tf.ctl |= ATA_SRST; /* setup SRST bit in taskfile control reg */ ata_tf_to_fis(&tf, pmp, 0, cfis); @@ -886,7 +888,8 @@ issue_srst: * using ATA signature D2H register FIS to the host controller. */ - sata_fsl_setup_cmd_hdr_entry(pp, 0, CMD_DESC_SNOOP_ENABLE, 0, 0, 5); + sata_fsl_setup_cmd_hdr_entry(pp, 0, CMD_DESC_RES | CMD_DESC_SNOOP_ENABLE, + 0, 0, 5); tf.ctl &= ~ATA_SRST; /* 2nd H2D Ctl. register FIS */ ata_tf_to_fis(&tf, pmp, 0, cfis); diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index 870dcfd8235..23714aefb82 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c @@ -293,6 +293,10 @@ enum { FISCFG_WAIT_DEV_ERR = (1 << 8), /* wait for host on DevErr */ FISCFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */ + PHY_MODE9_GEN2 = 0x398, + PHY_MODE9_GEN1 = 0x39c, + PHYCFG_OFS = 0x3a0, /* only in 65n devices */ + MV5_PHY_MODE = 0x74, MV5_LTMODE = 0x30, MV5_PHY_CTL = 0x0C, @@ -609,6 +613,8 @@ static int mv_soc_reset_hc(struct mv_host_priv *hpriv, static void mv_soc_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio); static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio); +static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv, + void __iomem *mmio, unsigned int port); static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio); static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int port_no); @@ -807,6 +813,14 @@ static const struct mv_hw_ops mv_soc_ops = { .reset_bus = mv_soc_reset_bus, }; +static const struct mv_hw_ops mv_soc_65n_ops = { + .phy_errata = mv_soc_65n_phy_errata, + .enable_leds = mv_soc_enable_leds, + .reset_hc = mv_soc_reset_hc, + .reset_flash = mv_soc_reset_flash, + .reset_bus = mv_soc_reset_bus, +}; + /* * Functions */ @@ -3397,6 +3411,53 @@ static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio) return; } +static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv, + void __iomem *mmio, unsigned int port) +{ + void __iomem *port_mmio = mv_port_base(mmio, port); + u32 reg; + + reg = readl(port_mmio + PHY_MODE3); + reg &= ~(0x3 << 27); /* SELMUPF (bits 28:27) to 1 */ + reg |= (0x1 << 27); + reg &= ~(0x3 << 29); /* SELMUPI (bits 30:29) to 1 */ + reg |= (0x1 << 29); + writel(reg, port_mmio + PHY_MODE3); + + reg = readl(port_mmio + PHY_MODE4); + reg &= ~0x1; /* SATU_OD8 (bit 0) to 0, reserved bit 16 must be set */ + reg |= (0x1 << 16); + writel(reg, port_mmio + PHY_MODE4); + + reg = readl(port_mmio + PHY_MODE9_GEN2); + reg &= ~0xf; /* TXAMP[3:0] (bits 3:0) to 8 */ + reg |= 0x8; + reg &= ~(0x1 << 14); /* TXAMP[4] (bit 14) to 0 */ + writel(reg, port_mmio + PHY_MODE9_GEN2); + + reg = readl(port_mmio + PHY_MODE9_GEN1); + reg &= ~0xf; /* TXAMP[3:0] (bits 3:0) to 8 */ + reg |= 0x8; + reg &= ~(0x1 << 14); /* TXAMP[4] (bit 14) to 0 */ + writel(reg, port_mmio + PHY_MODE9_GEN1); +} + +/** + * soc_is_65 - check if the soc is 65 nano device + * + * Detect the type of the SoC, this is done by reading the PHYCFG_OFS + * register, this register should contain non-zero value and it exists only + * in the 65 nano devices, when reading it from older devices we get 0. + */ +static bool soc_is_65n(struct mv_host_priv *hpriv) +{ + void __iomem *port0_mmio = mv_port_base(hpriv->base, 0); + + if (readl(port0_mmio + PHYCFG_OFS)) + return true; + return false; +} + static void mv_setup_ifcfg(void __iomem *port_mmio, int want_gen2i) { u32 ifcfg = readl(port_mmio + SATA_IFCFG); @@ -3737,7 +3798,10 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx) } break; case chip_soc: - hpriv->ops = &mv_soc_ops; + if (soc_is_65n(hpriv)) + hpriv->ops = &mv_soc_65n_ops; + else + hpriv->ops = &mv_soc_ops; hp_flags |= MV_HP_FLAG_SOC | MV_HP_GEN_IIE | MV_HP_ERRATA_60X1C0; break; @@ -3800,7 +3864,8 @@ static int mv_init_host(struct ata_host *host, unsigned int board_idx) n_hc = mv_get_hc_count(host->ports[0]->flags); for (port = 0; port < host->n_ports; port++) - hpriv->ops->read_preamp(hpriv, port, mmio); + if (hpriv->ops->read_preamp) + hpriv->ops->read_preamp(hpriv, port, mmio); rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc); if (rc) diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c index dce3dccced3..eb05a3c82a9 100644 --- a/drivers/ata/sata_sx4.c +++ b/drivers/ata/sata_sx4.c @@ -213,8 +213,9 @@ struct pdc_host_priv { static int pdc_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); -static void pdc_eng_timeout(struct ata_port *ap); -static void pdc_20621_phy_reset(struct ata_port *ap); +static void pdc_error_handler(struct ata_port *ap); +static void pdc_freeze(struct ata_port *ap); +static void pdc_thaw(struct ata_port *ap); static int pdc_port_start(struct ata_port *ap); static void pdc20621_qc_prep(struct ata_queued_cmd *qc); static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf); @@ -233,6 +234,10 @@ static void pdc20621_put_to_dimm(struct ata_host *host, void *psource, u32 offset, u32 size); static void pdc20621_irq_clear(struct ata_port *ap); static unsigned int pdc20621_qc_issue(struct ata_queued_cmd *qc); +static int pdc_softreset(struct ata_link *link, unsigned int *class, + unsigned long deadline); +static void pdc_post_internal_cmd(struct ata_queued_cmd *qc); +static int pdc_check_atapi_dma(struct ata_queued_cmd *qc); static struct scsi_host_template pdc_sata_sht = { @@ -243,20 +248,24 @@ static struct scsi_host_template pdc_sata_sht = { /* TODO: inherit from base port_ops after converting to new EH */ static struct ata_port_operations pdc_20621_ops = { - .sff_tf_load = pdc_tf_load_mmio, - .sff_tf_read = ata_sff_tf_read, - .sff_check_status = ata_sff_check_status, - .sff_exec_command = pdc_exec_command_mmio, - .sff_dev_select = ata_sff_dev_select, - .phy_reset = pdc_20621_phy_reset, + .inherits = &ata_sff_port_ops, + + .check_atapi_dma = pdc_check_atapi_dma, .qc_prep = pdc20621_qc_prep, .qc_issue = pdc20621_qc_issue, - .qc_fill_rtf = ata_sff_qc_fill_rtf, - .sff_data_xfer = ata_sff_data_xfer, - .eng_timeout = pdc_eng_timeout, - .sff_irq_clear = pdc20621_irq_clear, - .sff_irq_on = ata_sff_irq_on, + + .freeze = pdc_freeze, + .thaw = pdc_thaw, + .softreset = pdc_softreset, + .error_handler = pdc_error_handler, + .lost_interrupt = ATA_OP_NULL, + .post_internal_cmd = pdc_post_internal_cmd, + .port_start = pdc_port_start, + + .sff_tf_load = pdc_tf_load_mmio, + .sff_exec_command = pdc_exec_command_mmio, + .sff_irq_clear = pdc20621_irq_clear, }; static const struct ata_port_info pdc_port_info[] = { @@ -310,14 +319,6 @@ static int pdc_port_start(struct ata_port *ap) return 0; } -static void pdc_20621_phy_reset(struct ata_port *ap) -{ - VPRINTK("ENTER\n"); - ap->cbl = ATA_CBL_SATA; - ata_port_probe(ap); - ata_bus_reset(ap); -} - static inline void pdc20621_ata_sg(struct ata_taskfile *tf, u8 *buf, unsigned int portno, unsigned int total_len) @@ -686,8 +687,11 @@ static void pdc20621_packet_start(struct ata_queued_cmd *qc) static unsigned int pdc20621_qc_issue(struct ata_queued_cmd *qc) { switch (qc->tf.protocol) { - case ATA_PROT_DMA: case ATA_PROT_NODATA: + if (qc->tf.flags & ATA_TFLAG_POLLING) + break; + /*FALLTHROUGH*/ + case ATA_PROT_DMA: pdc20621_packet_start(qc); return 0; @@ -786,12 +790,7 @@ static inline unsigned int pdc20621_host_intr(struct ata_port *ap, static void pdc20621_irq_clear(struct ata_port *ap) { - struct ata_host *host = ap->host; - void __iomem *mmio = host->iomap[PDC_MMIO_BAR]; - - mmio += PDC_CHIP0_OFS; - - readl(mmio + PDC_20621_SEQMASK); + ioread8(ap->ioaddr.status_addr); } static irqreturn_t pdc20621_interrupt(int irq, void *dev_instance) @@ -859,46 +858,119 @@ static irqreturn_t pdc20621_interrupt(int irq, void *dev_instance) return IRQ_RETVAL(handled); } -static void pdc_eng_timeout(struct ata_port *ap) +static void pdc_freeze(struct ata_port *ap) { - u8 drv_stat; - struct ata_host *host = ap->host; - struct ata_queued_cmd *qc; - unsigned long flags; + void __iomem *mmio = ap->ioaddr.cmd_addr; + u32 tmp; - DPRINTK("ENTER\n"); + /* FIXME: if all 4 ATA engines are stopped, also stop HDMA engine */ - spin_lock_irqsave(&host->lock, flags); + tmp = readl(mmio + PDC_CTLSTAT); + tmp |= PDC_MASK_INT; + tmp &= ~PDC_DMA_ENABLE; + writel(tmp, mmio + PDC_CTLSTAT); + readl(mmio + PDC_CTLSTAT); /* flush */ +} - qc = ata_qc_from_tag(ap, ap->link.active_tag); +static void pdc_thaw(struct ata_port *ap) +{ + void __iomem *mmio = ap->ioaddr.cmd_addr; + u32 tmp; - switch (qc->tf.protocol) { - case ATA_PROT_DMA: - case ATA_PROT_NODATA: - ata_port_printk(ap, KERN_ERR, "command timeout\n"); - qc->err_mask |= __ac_err_mask(ata_wait_idle(ap)); - break; + /* FIXME: start HDMA engine, if zero ATA engines running */ - default: - drv_stat = ata_sff_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000); + /* clear IRQ */ + ioread8(ap->ioaddr.status_addr); - ata_port_printk(ap, KERN_ERR, - "unknown timeout, cmd 0x%x stat 0x%x\n", - qc->tf.command, drv_stat); + /* turn IRQ back on */ + tmp = readl(mmio + PDC_CTLSTAT); + tmp &= ~PDC_MASK_INT; + writel(tmp, mmio + PDC_CTLSTAT); + readl(mmio + PDC_CTLSTAT); /* flush */ +} - qc->err_mask |= ac_err_mask(drv_stat); - break; +static void pdc_reset_port(struct ata_port *ap) +{ + void __iomem *mmio = ap->ioaddr.cmd_addr + PDC_CTLSTAT; + unsigned int i; + u32 tmp; + + /* FIXME: handle HDMA copy engine */ + + for (i = 11; i > 0; i--) { + tmp = readl(mmio); + if (tmp & PDC_RESET) + break; + + udelay(100); + + tmp |= PDC_RESET; + writel(tmp, mmio); } - spin_unlock_irqrestore(&host->lock, flags); - ata_eh_qc_complete(qc); - DPRINTK("EXIT\n"); + tmp &= ~PDC_RESET; + writel(tmp, mmio); + readl(mmio); /* flush */ +} + +static int pdc_softreset(struct ata_link *link, unsigned int *class, + unsigned long deadline) +{ + pdc_reset_port(link->ap); + return ata_sff_softreset(link, class, deadline); +} + +static void pdc_error_handler(struct ata_port *ap) +{ + if (!(ap->pflags & ATA_PFLAG_FROZEN)) + pdc_reset_port(ap); + + ata_std_error_handler(ap); +} + +static void pdc_post_internal_cmd(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + + /* make DMA engine forget about the failed command */ + if (qc->flags & ATA_QCFLAG_FAILED) + pdc_reset_port(ap); +} + +static int pdc_check_atapi_dma(struct ata_queued_cmd *qc) +{ + u8 *scsicmd = qc->scsicmd->cmnd; + int pio = 1; /* atapi dma off by default */ + + /* Whitelist commands that may use DMA. */ + switch (scsicmd[0]) { + case WRITE_12: + case WRITE_10: + case WRITE_6: + case READ_12: + case READ_10: + case READ_6: + case 0xad: /* READ_DVD_STRUCTURE */ + case 0xbe: /* READ_CD */ + pio = 0; + } + /* -45150 (FFFF4FA2) to -1 (FFFFFFFF) shall use PIO mode */ + if (scsicmd[0] == WRITE_10) { + unsigned int lba = + (scsicmd[2] << 24) | + (scsicmd[3] << 16) | + (scsicmd[4] << 8) | + scsicmd[5]; + if (lba >= 0xFFFF4FA2) + pio = 1; + } + return pio; } static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf) { WARN_ON(tf->protocol == ATA_PROT_DMA || - tf->protocol == ATA_PROT_NODATA); + tf->protocol == ATAPI_PROT_DMA); ata_sff_tf_load(ap, tf); } @@ -906,7 +978,7 @@ static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf) static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf) { WARN_ON(tf->protocol == ATA_PROT_DMA || - tf->protocol == ATA_PROT_NODATA); + tf->protocol == ATAPI_PROT_DMA); ata_sff_exec_command(ap, tf); } diff --git a/drivers/base/bus.c b/drivers/base/bus.c index dc030f1f00f..c6599618523 100644 --- a/drivers/base/bus.c +++ b/drivers/base/bus.c @@ -700,8 +700,10 @@ int bus_add_driver(struct device_driver *drv) } kobject_uevent(&priv->kobj, KOBJ_ADD); - return error; + return 0; out_unregister: + kfree(drv->p); + drv->p = NULL; kobject_put(&priv->kobj); out_put_bus: bus_put(bus); diff --git a/drivers/base/core.c b/drivers/base/core.c index 4aa527b8a91..1977d4beb89 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -879,7 +879,7 @@ int device_add(struct device *dev) } if (!dev_name(dev)) - goto done; + goto name_error; pr_debug("device: '%s': %s\n", dev_name(dev), __func__); @@ -978,6 +978,9 @@ done: cleanup_device_parent(dev); if (parent) put_device(parent); +name_error: + kfree(dev->p); + dev->p = NULL; goto done; } diff --git a/drivers/base/driver.c b/drivers/base/driver.c index c51f11bb29a..8ae0f63602e 100644 --- a/drivers/base/driver.c +++ b/drivers/base/driver.c @@ -257,6 +257,10 @@ EXPORT_SYMBOL_GPL(driver_register); */ void driver_unregister(struct device_driver *drv) { + if (!drv || !drv->p) { + WARN(1, "Unexpected driver unregister!\n"); + return; + } driver_remove_groups(drv, drv->groups); bus_remove_driver(drv); } diff --git a/drivers/base/platform.c b/drivers/base/platform.c index b5b6c973a2e..8b4708e0624 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -217,7 +217,6 @@ int platform_device_add_data(struct platform_device *pdev, const void *data, if (d) { memcpy(d, data, size); pdev->dev.platform_data = d; - pdev->platform_data = d; } return d ? 0 : -ENOMEM; } @@ -247,21 +246,6 @@ int platform_device_add(struct platform_device *pdev) else dev_set_name(&pdev->dev, pdev->name); - /* We will remove platform_data field from struct device - * if all platform devices pass its platform specific data - * from platform_device. The conversion is going to be a - * long time, so we allow the two cases coexist to make - * this kind of fix more easily*/ - if (pdev->platform_data && pdev->dev.platform_data) { - printk(KERN_ERR - "%s: use which platform_data?\n", - dev_name(&pdev->dev)); - } else if (pdev->platform_data) { - pdev->dev.platform_data = pdev->platform_data; - } else if (pdev->dev.platform_data) { - pdev->platform_data = pdev->dev.platform_data; - } - for (i = 0; i < pdev->num_resources; i++) { struct resource *p, *r = &pdev->resource[i]; @@ -1028,7 +1012,7 @@ static __initdata LIST_HEAD(early_platform_device_list); /** * early_platform_driver_register - * @edrv: early_platform driver structure + * @epdrv: early_platform driver structure * @buf: string passed from early_param() */ int __init early_platform_driver_register(struct early_platform_driver *epdrv, @@ -1112,7 +1096,7 @@ void __init early_platform_driver_register_all(char *class_str) /** * early_platform_match - * @edrv: early platform driver structure + * @epdrv: early platform driver structure * @id: id to match against */ static __init struct platform_device * @@ -1130,7 +1114,7 @@ early_platform_match(struct early_platform_driver *epdrv, int id) /** * early_platform_left - * @edrv: early platform driver structure + * @epdrv: early platform driver structure * @id: return true if id or above exists */ static __init int early_platform_left(struct early_platform_driver *epdrv, diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 69b4ddb7de3..3e4bc699bc0 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -357,6 +357,7 @@ static void dpm_power_up(pm_message_t state) { struct device *dev; + mutex_lock(&dpm_list_mtx); list_for_each_entry(dev, &dpm_list, power.entry) if (dev->power.status > DPM_OFF) { int error; @@ -366,6 +367,7 @@ static void dpm_power_up(pm_message_t state) if (error) pm_dev_err(dev, state, " early", error); } + mutex_unlock(&dpm_list_mtx); } /** @@ -614,6 +616,7 @@ int device_power_down(pm_message_t state) int error = 0; suspend_device_irqs(); + mutex_lock(&dpm_list_mtx); list_for_each_entry_reverse(dev, &dpm_list, power.entry) { error = suspend_device_noirq(dev, state); if (error) { @@ -622,6 +625,7 @@ int device_power_down(pm_message_t state) } dev->power.status = DPM_OFF_IRQ; } + mutex_unlock(&dpm_list_mtx); if (error) device_power_up(resume_event(state)); return error; diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 8f905089b72..a6cbf7b808e 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -934,8 +934,6 @@ static void blkfront_closing(struct xenbus_device *dev) spin_lock_irqsave(&blkif_io_lock, flags); - del_gendisk(info->gd); - /* No more blkif_request(). */ blk_stop_queue(info->rq); @@ -949,6 +947,8 @@ static void blkfront_closing(struct xenbus_device *dev) blk_cleanup_queue(info->rq); info->rq = NULL; + del_gendisk(info->gd); + out: xenbus_frontend_closed(dev); } @@ -977,8 +977,10 @@ static void backend_changed(struct xenbus_device *dev, break; case XenbusStateClosing: - if (info->gd == NULL) - xenbus_dev_fatal(dev, -ENODEV, "gd is NULL"); + if (info->gd == NULL) { + xenbus_frontend_closed(dev); + break; + } bd = bdget_disk(info->gd, 0); if (bd == NULL) xenbus_dev_fatal(dev, -ENODEV, "bdget failed"); diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c index 13929356135..9b1624e0dde 100644 --- a/drivers/cdrom/viocd.c +++ b/drivers/cdrom/viocd.c @@ -587,7 +587,7 @@ static int viocd_probe(struct vio_dev *vdev, const struct vio_device_id *id) struct device_node *node = vdev->dev.archdata.of_node; deviceno = vdev->unit_address; - if (deviceno > VIOCD_MAX_CD) + if (deviceno >= VIOCD_MAX_CD) return -ENODEV; if (!node) return -ENODEV; diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index aa83a0865ec..09050797c76 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c @@ -2856,6 +2856,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers, /* Assume a single IPMB channel at zero. */ intf->channels[0].medium = IPMI_CHANNEL_MEDIUM_IPMB; intf->channels[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB; + intf->curr_channel = IPMI_MAX_CHANNELS; } if (rv == 0) @@ -3648,13 +3649,13 @@ static int handle_new_recv_msg(ipmi_smi_t intf, } /* - ** We need to make sure the channels have been initialized. - ** The channel_handler routine will set the "curr_channel" - ** equal to or greater than IPMI_MAX_CHANNELS when all the - ** channels for this interface have been initialized. - */ + * We need to make sure the channels have been initialized. + * The channel_handler routine will set the "curr_channel" + * equal to or greater than IPMI_MAX_CHANNELS when all the + * channels for this interface have been initialized. + */ if (intf->curr_channel < IPMI_MAX_CHANNELS) { - requeue = 1; /* Just put the message back for now */ + requeue = 0; /* Throw the message away */ goto out; } diff --git a/drivers/char/mem.c b/drivers/char/mem.c index 8f05c38c2f0..65e12bca657 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c @@ -694,6 +694,9 @@ static ssize_t read_zero(struct file * file, char __user * buf, written += chunk - unwritten; if (unwritten) break; + /* Consider changing this to just 'signal_pending()' with lots of testing */ + if (fatal_signal_pending(current)) + return written ? written : -EINTR; buf += chunk; count -= chunk; cond_resched(); diff --git a/drivers/char/mxser.c b/drivers/char/mxser.c index a420e8d437d..13f8871e5b2 100644 --- a/drivers/char/mxser.c +++ b/drivers/char/mxser.c @@ -2711,7 +2711,7 @@ static int __init mxser_module_init(void) continue; brd = &mxser_boards[m]; - retval = mxser_get_ISA_conf(!ioaddr[b], brd); + retval = mxser_get_ISA_conf(ioaddr[b], brd); if (retval <= 0) { brd->info = NULL; continue; diff --git a/drivers/char/random.c b/drivers/char/random.c index b2ced39d76b..8c7444857a4 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -1673,7 +1673,7 @@ unsigned int get_random_int(void) int ret; keyptr = get_keyptr(); - hash[0] += current->pid + jiffies + get_cycles() + (int)(long)&ret; + hash[0] += current->pid + jiffies + get_cycles(); ret = half_md4_transform(hash, keyptr->secret); put_cpu_var(get_random_int_hash); diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c index b0a6a3e5192..d6a807f4077 100644 --- a/drivers/char/sysrq.c +++ b/drivers/char/sysrq.c @@ -406,7 +406,7 @@ static struct sysrq_key_op *sysrq_key_table[36] = { &sysrq_showlocks_op, /* d */ &sysrq_term_op, /* e */ &sysrq_moom_op, /* f */ - /* g: May be registered by ppc for kgdb */ + /* g: May be registered for the kernel debugger */ NULL, /* g */ NULL, /* h - reserved for help */ &sysrq_kill_op, /* i */ @@ -431,7 +431,7 @@ static struct sysrq_key_op *sysrq_key_table[36] = { &sysrq_sync_op, /* s */ &sysrq_showstate_op, /* t */ &sysrq_mountro_op, /* u */ - /* v: May be registered at init time by SMP VOYAGER */ + /* v: May be registered for frame buffer console restore */ NULL, /* v */ &sysrq_showstate_blocked_op, /* w */ /* x: May be registered on ppc/powerpc for xmon */ diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c index ed306eb1057..0c2f55a38b9 100644 --- a/drivers/char/tpm/tpm_bios.c +++ b/drivers/char/tpm/tpm_bios.c @@ -212,7 +212,8 @@ static int get_event_name(char *dest, struct tcpa_event *event, unsigned char * event_entry) { const char *name = ""; - char data[40] = ""; + /* 41 so there is room for 40 data and 1 nul */ + char data[41] = ""; int i, n_len = 0, d_len = 0; struct tcpa_pc_event *pc_event; diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index d270e8eb3e6..6e2ec0b1894 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -808,7 +808,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) ret = -ENOMEM; goto nomem_out; } - if (!alloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) { + if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) { free_cpumask_var(policy->cpus); kfree(policy); ret = -ENOMEM; @@ -1070,11 +1070,11 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev) spin_unlock_irqrestore(&cpufreq_driver_lock, flags); #endif + unlock_policy_rwsem_write(cpu); + if (cpufreq_driver->target) __cpufreq_governor(data, CPUFREQ_GOV_STOP); - unlock_policy_rwsem_write(cpu); - kobject_put(&data->kobj); /* we need to make sure that the underlying kobj is actually diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 2ecd95e4ab1..7a74d175287 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -91,6 +91,9 @@ static unsigned int dbs_enable; /* number of CPUs using this policy */ * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock * is recursive for the same process. -Venki + * DEADLOCK ALERT! (2) : do_dbs_timer() must not take the dbs_mutex, because it + * would deadlock with cancel_delayed_work_sync(), which is needed for proper + * raceless workqueue teardown. */ static DEFINE_MUTEX(dbs_mutex); @@ -542,7 +545,7 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) { dbs_info->enable = 0; - cancel_delayed_work(&dbs_info->work); + cancel_delayed_work_sync(&dbs_info->work); } static int cpufreq_governor_dbs(struct cpufreq_policy *policy, diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 338f428a15b..e741c339df7 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -98,6 +98,9 @@ static unsigned int dbs_enable; /* number of CPUs using this policy */ * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock * is recursive for the same process. -Venki + * DEADLOCK ALERT! (2) : do_dbs_timer() must not take the dbs_mutex, because it + * would deadlock with cancel_delayed_work_sync(), which is needed for proper + * raceless workqueue teardown. */ static DEFINE_MUTEX(dbs_mutex); @@ -562,7 +565,7 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) { dbs_info->enable = 0; - cancel_delayed_work(&dbs_info->work); + cancel_delayed_work_sync(&dbs_info->work); } static int cpufreq_governor_dbs(struct cpufreq_policy *policy, diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c index f9f05d7a707..6c6656d3b1e 100644 --- a/drivers/crypto/ixp4xx_crypto.c +++ b/drivers/crypto/ixp4xx_crypto.c @@ -415,6 +415,7 @@ static void crypto_done_action(unsigned long arg) static int init_ixp_crypto(void) { int ret = -ENODEV; + u32 msg[2] = { 0, 0 }; if (! ( ~(*IXP4XX_EXP_CFG2) & (IXP4XX_FEATURE_HASH | IXP4XX_FEATURE_AES | IXP4XX_FEATURE_DES))) { @@ -426,9 +427,35 @@ static int init_ixp_crypto(void) return ret; if (!npe_running(npe_c)) { - npe_load_firmware(npe_c, npe_name(npe_c), dev); + ret = npe_load_firmware(npe_c, npe_name(npe_c), dev); + if (ret) { + return ret; + } + if (npe_recv_message(npe_c, msg, "STATUS_MSG")) + goto npe_error; + } else { + if (npe_send_message(npe_c, msg, "STATUS_MSG")) + goto npe_error; + + if (npe_recv_message(npe_c, msg, "STATUS_MSG")) + goto npe_error; } + switch ((msg[1]>>16) & 0xff) { + case 3: + printk(KERN_WARNING "Firmware of %s lacks AES support\n", + npe_name(npe_c)); + support_aes = 0; + break; + case 4: + case 5: + support_aes = 1; + break; + default: + printk(KERN_ERR "Firmware of %s lacks crypto support\n", + npe_name(npe_c)); + return -ENODEV; + } /* buffer_pool will also be used to sometimes store the hmac, * so assure it is large enough */ @@ -459,6 +486,10 @@ static int init_ixp_crypto(void) qmgr_enable_irq(RECV_QID); return 0; + +npe_error: + printk(KERN_ERR "%s not responding\n", npe_name(npe_c)); + ret = -EIO; err: if (ctx_pool) dma_pool_destroy(ctx_pool); diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index 3f0fdd18255..856b3cc2558 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c @@ -489,4 +489,4 @@ MODULE_DESCRIPTION("VIA PadLock AES algorithm support"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Michal Ludvig"); -MODULE_ALIAS("aes-all"); +MODULE_ALIAS("aes"); diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 92438e9dacc..5a87384ea4f 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -804,11 +804,14 @@ dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest, dma_addr_t dma_dest, dma_src; dma_cookie_t cookie; int cpu; + unsigned long flags; dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE); dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE); - tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, - DMA_CTRL_ACK); + flags = DMA_CTRL_ACK | + DMA_COMPL_SRC_UNMAP_SINGLE | + DMA_COMPL_DEST_UNMAP_SINGLE; + tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags); if (!tx) { dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); @@ -850,11 +853,12 @@ dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page, dma_addr_t dma_dest, dma_src; dma_cookie_t cookie; int cpu; + unsigned long flags; dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE); dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE); - tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, - DMA_CTRL_ACK); + flags = DMA_CTRL_ACK | DMA_COMPL_SRC_UNMAP_SINGLE; + tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags); if (!tx) { dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); @@ -898,12 +902,13 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg, dma_addr_t dma_dest, dma_src; dma_cookie_t cookie; int cpu; + unsigned long flags; dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE); dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len, DMA_FROM_DEVICE); - tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, - DMA_CTRL_ACK); + flags = DMA_CTRL_ACK; + tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags); if (!tx) { dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE); diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index a27c0fb1bc1..fb7da5141e9 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -531,9 +531,7 @@ static int __init dmatest_init(void) chan = dma_request_channel(mask, filter, NULL); if (chan) { err = dmatest_add_channel(chan); - if (err == 0) - continue; - else { + if (err) { dma_release_channel(chan); break; /* add_channel failed, punt */ } diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index da8a8ed9e41..f18d1bde043 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -179,9 +179,14 @@ static void dma_halt(struct fsl_dma_chan *fsl_chan) static void set_ld_eol(struct fsl_dma_chan *fsl_chan, struct fsl_desc_sw *desc) { + u64 snoop_bits; + + snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) + ? FSL_DMA_SNEN : 0; + desc->hw.next_ln_addr = CPU_TO_DMA(fsl_chan, - DMA_TO_CPU(fsl_chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL, - 64); + DMA_TO_CPU(fsl_chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL + | snoop_bits, 64); } static void append_ld_queue(struct fsl_dma_chan *fsl_chan, @@ -313,8 +318,8 @@ static void fsl_chan_toggle_ext_start(struct fsl_dma_chan *fsl_chan, int enable) static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) { - struct fsl_desc_sw *desc = tx_to_fsl_desc(tx); struct fsl_dma_chan *fsl_chan = to_fsl_chan(tx->chan); + struct fsl_desc_sw *desc; unsigned long flags; dma_cookie_t cookie; @@ -322,14 +327,17 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) spin_lock_irqsave(&fsl_chan->desc_lock, flags); cookie = fsl_chan->common.cookie; - cookie++; - if (cookie < 0) - cookie = 1; - desc->async_tx.cookie = cookie; - fsl_chan->common.cookie = desc->async_tx.cookie; + list_for_each_entry(desc, &tx->tx_list, node) { + cookie++; + if (cookie < 0) + cookie = 1; - append_ld_queue(fsl_chan, desc); - list_splice_init(&desc->async_tx.tx_list, fsl_chan->ld_queue.prev); + desc->async_tx.cookie = cookie; + } + + fsl_chan->common.cookie = cookie; + append_ld_queue(fsl_chan, tx_to_fsl_desc(tx)); + list_splice_init(&tx->tx_list, fsl_chan->ld_queue.prev); spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); @@ -454,8 +462,8 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( { struct fsl_dma_chan *fsl_chan; struct fsl_desc_sw *first = NULL, *prev = NULL, *new; + struct list_head *list; size_t copy; - LIST_HEAD(link_chain); if (!chan) return NULL; @@ -472,7 +480,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( if (!new) { dev_err(fsl_chan->dev, "No free memory for link descriptor\n"); - return NULL; + goto fail; } #ifdef FSL_DMA_LD_DEBUG dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new); @@ -507,7 +515,19 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( /* Set End-of-link to the last link descriptor of new list*/ set_ld_eol(fsl_chan, new); - return first ? &first->async_tx : NULL; + return &first->async_tx; + +fail: + if (!first) + return NULL; + + list = &first->async_tx.tx_list; + list_for_each_entry_safe_reverse(new, prev, list, node) { + list_del(&new->node); + dma_pool_free(fsl_chan->desc_pool, new, new->async_tx.phys); + } + + return NULL; } /** @@ -598,15 +618,16 @@ static void fsl_chan_xfer_ld_queue(struct fsl_dma_chan *fsl_chan) dma_addr_t next_dest_addr; unsigned long flags; + spin_lock_irqsave(&fsl_chan->desc_lock, flags); + if (!dma_is_idle(fsl_chan)) - return; + goto out_unlock; dma_halt(fsl_chan); /* If there are some link descriptors * not transfered in queue. We need to start it. */ - spin_lock_irqsave(&fsl_chan->desc_lock, flags); /* Find the first un-transfer desciptor */ for (ld_node = fsl_chan->ld_queue.next; @@ -617,19 +638,20 @@ static void fsl_chan_xfer_ld_queue(struct fsl_dma_chan *fsl_chan) fsl_chan->common.cookie) == DMA_SUCCESS); ld_node = ld_node->next); - spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); - if (ld_node != &fsl_chan->ld_queue) { /* Get the ld start address from ld_queue */ next_dest_addr = to_fsl_desc(ld_node)->async_tx.phys; - dev_dbg(fsl_chan->dev, "xfer LDs staring from %p\n", - (void *)next_dest_addr); + dev_dbg(fsl_chan->dev, "xfer LDs staring from 0x%llx\n", + (unsigned long long)next_dest_addr); set_cdar(fsl_chan, next_dest_addr); dma_start(fsl_chan); } else { set_cdar(fsl_chan, 0); set_ndar(fsl_chan, 0); } + +out_unlock: + spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); } /** @@ -734,8 +756,9 @@ static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data) */ if (stat & FSL_DMA_SR_EOSI) { dev_dbg(fsl_chan->dev, "event: End-of-segments INT\n"); - dev_dbg(fsl_chan->dev, "event: clndar %p, nlndar %p\n", - (void *)get_cdar(fsl_chan), (void *)get_ndar(fsl_chan)); + dev_dbg(fsl_chan->dev, "event: clndar 0x%llx, nlndar 0x%llx\n", + (unsigned long long)get_cdar(fsl_chan), + (unsigned long long)get_ndar(fsl_chan)); stat &= ~FSL_DMA_SR_EOSI; update_cookie = 1; } @@ -830,7 +853,7 @@ static int __devinit fsl_dma_chan_probe(struct fsl_dma_device *fdev, new_fsl_chan->reg.end - new_fsl_chan->reg.start + 1); new_fsl_chan->id = ((new_fsl_chan->reg.start - 0x100) & 0xfff) >> 7; - if (new_fsl_chan->id > FSL_DMA_MAX_CHANS_PER_DEVICE) { + if (new_fsl_chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) { dev_err(fdev->dev, "There is no %d channel!\n", new_fsl_chan->id); err = -EINVAL; @@ -925,8 +948,8 @@ static int __devinit of_fsl_dma_probe(struct of_device *dev, } dev_info(&dev->dev, "Probe the Freescale DMA driver for %s " - "controller at %p...\n", - match->compatible, (void *)fdev->reg.start); + "controller at 0x%llx...\n", + match->compatible, (unsigned long long)fdev->reg.start); fdev->reg_base = ioremap(fdev->reg.start, fdev->reg.end - fdev->reg.start + 1); diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c index e4fc33c1c32..a600fc0f796 100644 --- a/drivers/dma/ioat_dma.c +++ b/drivers/dma/ioat_dma.c @@ -173,7 +173,7 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device) xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale)); #ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL - if (i7300_idle_platform_probe(NULL, NULL) == 0) { + if (i7300_idle_platform_probe(NULL, NULL, 1) == 0) { device->common.chancnt--; } #endif @@ -1063,22 +1063,31 @@ static void ioat_dma_cleanup_tasklet(unsigned long data) static void ioat_dma_unmap(struct ioat_dma_chan *ioat_chan, struct ioat_desc_sw *desc) { - /* - * yes we are unmapping both _page and _single - * alloc'd regions with unmap_page. Is this - * *really* that bad? - */ - if (!(desc->async_tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) - pci_unmap_page(ioat_chan->device->pdev, - pci_unmap_addr(desc, dst), - pci_unmap_len(desc, len), - PCI_DMA_FROMDEVICE); + if (!(desc->async_tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { + if (desc->async_tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE) + pci_unmap_single(ioat_chan->device->pdev, + pci_unmap_addr(desc, dst), + pci_unmap_len(desc, len), + PCI_DMA_FROMDEVICE); + else + pci_unmap_page(ioat_chan->device->pdev, + pci_unmap_addr(desc, dst), + pci_unmap_len(desc, len), + PCI_DMA_FROMDEVICE); + } - if (!(desc->async_tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) - pci_unmap_page(ioat_chan->device->pdev, - pci_unmap_addr(desc, src), - pci_unmap_len(desc, len), - PCI_DMA_TODEVICE); + if (!(desc->async_tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { + if (desc->async_tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE) + pci_unmap_single(ioat_chan->device->pdev, + pci_unmap_addr(desc, src), + pci_unmap_len(desc, len), + PCI_DMA_TODEVICE); + else + pci_unmap_page(ioat_chan->device->pdev, + pci_unmap_addr(desc, src), + pci_unmap_len(desc, len), + PCI_DMA_TODEVICE); + } } /** @@ -1363,6 +1372,7 @@ static int ioat_dma_self_test(struct ioatdma_device *device) int err = 0; struct completion cmp; unsigned long tmo; + unsigned long flags; src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL); if (!src) @@ -1392,8 +1402,9 @@ static int ioat_dma_self_test(struct ioatdma_device *device) DMA_TO_DEVICE); dma_dest = dma_map_single(dma_chan->device->dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); + flags = DMA_COMPL_SRC_UNMAP_SINGLE | DMA_COMPL_DEST_UNMAP_SINGLE; tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src, - IOAT_TEST_SIZE, 0); + IOAT_TEST_SIZE, flags); if (!tx) { dev_err(&device->pdev->dev, "Self-test prep failed, disabling\n"); diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c index e202a6ce557..9a5bc1a7389 100644 --- a/drivers/dma/ipu/ipu_idmac.c +++ b/drivers/dma/ipu/ipu_idmac.c @@ -1272,7 +1272,8 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id) /* Other interrupts do not interfere with this channel */ spin_lock(&ichan->lock); if (unlikely(chan_id != IDMAC_SDC_0 && chan_id != IDMAC_SDC_1 && - ((curbuf >> chan_id) & 1) == ichan->active_buffer)) { + ((curbuf >> chan_id) & 1) == ichan->active_buffer && + !list_is_last(ichan->queue.next, &ichan->queue))) { int i = 100; /* This doesn't help. See comment in ipu_disable_channel() */ @@ -1547,7 +1548,7 @@ static irqreturn_t ic_sof_irq(int irq, void *dev_id) struct idmac_channel *ichan = dev_id; printk(KERN_DEBUG "Got SOF IRQ %d on Channel %d\n", irq, ichan->dma_chan.chan_id); - disable_irq(irq); + disable_irq_nosync(irq); return IRQ_HANDLED; } @@ -1556,7 +1557,7 @@ static irqreturn_t ic_eof_irq(int irq, void *dev_id) struct idmac_channel *ichan = dev_id; printk(KERN_DEBUG "Got EOF IRQ %d on Channel %d\n", irq, ichan->dma_chan.chan_id); - disable_irq(irq); + disable_irq_nosync(irq); return IRQ_HANDLED; } diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig index e5f5c5a8ba6..956982f8739 100644 --- a/drivers/edac/Kconfig +++ b/drivers/edac/Kconfig @@ -192,16 +192,20 @@ config EDAC_PPC4XX config EDAC_AMD8131 tristate "AMD8131 HyperTransport PCI-X Tunnel" - depends on EDAC_MM_EDAC && PCI + depends on EDAC_MM_EDAC && PCI && PPC_MAPLE help Support for error detection and correction on the AMD8131 HyperTransport PCI-X Tunnel chip. + Note, add more Kconfig dependency if it's adopted + on some machine other than Maple. config EDAC_AMD8111 tristate "AMD8111 HyperTransport I/O Hub" - depends on EDAC_MM_EDAC && PCI + depends on EDAC_MM_EDAC && PCI && PPC_MAPLE help Support for error detection and correction on the AMD8111 HyperTransport I/O Hub chip. + Note, add more Kconfig dependency if it's adopted + on some machine other than Maple. endif # EDAC diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile index a5fdcf02f59..59076819135 100644 --- a/drivers/edac/Makefile +++ b/drivers/edac/Makefile @@ -35,3 +35,5 @@ obj-$(CONFIG_EDAC_MPC85XX) += mpc85xx_edac.o obj-$(CONFIG_EDAC_MV64X60) += mv64x60_edac.o obj-$(CONFIG_EDAC_CELL) += cell_edac.o obj-$(CONFIG_EDAC_PPC4XX) += ppc4xx_edac.o +obj-$(CONFIG_EDAC_AMD8111) += amd8111_edac.o +obj-$(CONFIG_EDAC_AMD8131) += amd8131_edac.o diff --git a/drivers/edac/amd8111_edac.c b/drivers/edac/amd8111_edac.c index 61469218112..2cb58ef743e 100644 --- a/drivers/edac/amd8111_edac.c +++ b/drivers/edac/amd8111_edac.c @@ -389,7 +389,7 @@ static int amd8111_dev_probe(struct pci_dev *dev, dev_info->edac_dev->dev = &dev_info->dev->dev; dev_info->edac_dev->mod_name = AMD8111_EDAC_MOD_STR; dev_info->edac_dev->ctl_name = dev_info->ctl_name; - dev_info->edac_dev->dev_name = dev_info->dev->dev.bus_id; + dev_info->edac_dev->dev_name = dev_name(&dev_info->dev->dev); if (edac_op_state == EDAC_OPSTATE_POLL) dev_info->edac_dev->edac_check = dev_info->check; @@ -473,7 +473,7 @@ static int amd8111_pci_probe(struct pci_dev *dev, pci_info->edac_dev->dev = &pci_info->dev->dev; pci_info->edac_dev->mod_name = AMD8111_EDAC_MOD_STR; pci_info->edac_dev->ctl_name = pci_info->ctl_name; - pci_info->edac_dev->dev_name = pci_info->dev->dev.bus_id; + pci_info->edac_dev->dev_name = dev_name(&pci_info->dev->dev); if (edac_op_state == EDAC_OPSTATE_POLL) pci_info->edac_dev->edac_check = pci_info->check; diff --git a/drivers/edac/amd8131_edac.c b/drivers/edac/amd8131_edac.c index c083b31cac5..b432d60c622 100644 --- a/drivers/edac/amd8131_edac.c +++ b/drivers/edac/amd8131_edac.c @@ -287,7 +287,7 @@ static int amd8131_probe(struct pci_dev *dev, const struct pci_device_id *id) dev_info->edac_dev->dev = &dev_info->dev->dev; dev_info->edac_dev->mod_name = AMD8131_EDAC_MOD_STR; dev_info->edac_dev->ctl_name = dev_info->ctl_name; - dev_info->edac_dev->dev_name = dev_info->dev->dev.bus_id; + dev_info->edac_dev->dev_name = dev_name(&dev_info->dev->dev); if (edac_op_state == EDAC_OPSTATE_POLL) dev_info->edac_dev->edac_check = amd8131_chipset.check; diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 3a22eb9be37..f5d46e7199d 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -67,11 +67,18 @@ config DRM_I830 will load the correct one. config DRM_I915 + tristate "i915 driver" select FB_CFB_FILLRECT select FB_CFB_COPYAREA select FB_CFB_IMAGEBLIT select FB - tristate "i915 driver" + select FRAMEBUFFER_CONSOLE if !EMBEDDED + # i915 depends on ACPI_VIDEO when ACPI is enabled + # but for select to work, need to select ACPI_VIDEO's dependencies, ick + select VIDEO_OUTPUT_CONTROL if ACPI + select BACKLIGHT_CLASS_DEVICE if ACPI + select INPUT if ACPI + select ACPI_VIDEO if ACPI help Choose this option if you have a system that has Intel 830M, 845G, 852GM, 855GM 865G or 915G integrated graphics. If M is selected, the diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c index 6d80d17f1e9..0411d912d82 100644 --- a/drivers/gpu/drm/drm_bufs.c +++ b/drivers/gpu/drm/drm_bufs.c @@ -170,6 +170,14 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset, } DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n", (unsigned long long)map->offset, map->size, map->type); + + /* page-align _DRM_SHM maps. They are allocated here so there is no security + * hole created by that and it works around various broken drivers that use + * a non-aligned quantity to map the SAREA. --BenH + */ + if (map->type == _DRM_SHM) + map->size = PAGE_ALIGN(map->size); + if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) { drm_free(map, sizeof(*map), DRM_MEM_MAPS); return -EINVAL; diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 94a76887173..8fab7890a36 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -2294,7 +2294,12 @@ int drm_mode_connector_property_set_ioctl(struct drm_device *dev, } } - if (connector->funcs->set_property) + /* Do DPMS ourselves */ + if (property == connector->dev->mode_config.dpms_property) { + if (connector->funcs->dpms) + (*connector->funcs->dpms)(connector, (int) out_resp->value); + ret = 0; + } else if (connector->funcs->set_property) ret = connector->funcs->set_property(connector, property, out_resp->value); /* store the property value if succesful */ diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index 45890447fee..a6f73f1e99d 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c @@ -198,6 +198,29 @@ static void drm_helper_add_std_modes(struct drm_device *dev, } } +/** + * drm_helper_encoder_in_use - check if a given encoder is in use + * @encoder: encoder to check + * + * LOCKING: + * Caller must hold mode config lock. + * + * Walk @encoders's DRM device's mode_config and see if it's in use. + * + * RETURNS: + * True if @encoder is part of the mode_config, false otherwise. + */ +bool drm_helper_encoder_in_use(struct drm_encoder *encoder) +{ + struct drm_connector *connector; + struct drm_device *dev = encoder->dev; + list_for_each_entry(connector, &dev->mode_config.connector_list, head) + if (connector->encoder == encoder) + return true; + return false; +} +EXPORT_SYMBOL(drm_helper_encoder_in_use); + /** * drm_helper_crtc_in_use - check if a given CRTC is in a mode_config * @crtc: CRTC to check @@ -216,7 +239,7 @@ bool drm_helper_crtc_in_use(struct drm_crtc *crtc) struct drm_device *dev = crtc->dev; /* FIXME: Locking around list access? */ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) - if (encoder->crtc == crtc) + if (encoder->crtc == crtc && drm_helper_encoder_in_use(encoder)) return true; return false; } @@ -240,7 +263,7 @@ void drm_helper_disable_unused_functions(struct drm_device *dev) list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { encoder_funcs = encoder->helper_private; - if (!encoder->crtc) + if (!drm_helper_encoder_in_use(encoder)) (*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF); } @@ -935,6 +958,88 @@ bool drm_helper_initial_config(struct drm_device *dev) } EXPORT_SYMBOL(drm_helper_initial_config); +static int drm_helper_choose_encoder_dpms(struct drm_encoder *encoder) +{ + int dpms = DRM_MODE_DPMS_OFF; + struct drm_connector *connector; + struct drm_device *dev = encoder->dev; + + list_for_each_entry(connector, &dev->mode_config.connector_list, head) + if (connector->encoder == encoder) + if (connector->dpms < dpms) + dpms = connector->dpms; + return dpms; +} + +static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc) +{ + int dpms = DRM_MODE_DPMS_OFF; + struct drm_connector *connector; + struct drm_device *dev = crtc->dev; + + list_for_each_entry(connector, &dev->mode_config.connector_list, head) + if (connector->encoder && connector->encoder->crtc == crtc) + if (connector->dpms < dpms) + dpms = connector->dpms; + return dpms; +} + +/** + * drm_helper_connector_dpms + * @connector affected connector + * @mode DPMS mode + * + * Calls the low-level connector DPMS function, then + * calls appropriate encoder and crtc DPMS functions as well + */ +void drm_helper_connector_dpms(struct drm_connector *connector, int mode) +{ + struct drm_encoder *encoder = connector->encoder; + struct drm_crtc *crtc = encoder ? encoder->crtc : NULL; + int old_dpms; + + if (mode == connector->dpms) + return; + + old_dpms = connector->dpms; + connector->dpms = mode; + + /* from off to on, do crtc then encoder */ + if (mode < old_dpms) { + if (crtc) { + struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; + if (crtc_funcs->dpms) + (*crtc_funcs->dpms) (crtc, + drm_helper_choose_crtc_dpms(crtc)); + } + if (encoder) { + struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; + if (encoder_funcs->dpms) + (*encoder_funcs->dpms) (encoder, + drm_helper_choose_encoder_dpms(encoder)); + } + } + + /* from on to off, do encoder then crtc */ + if (mode > old_dpms) { + if (encoder) { + struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; + if (encoder_funcs->dpms) + (*encoder_funcs->dpms) (encoder, + drm_helper_choose_encoder_dpms(encoder)); + } + if (crtc) { + struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; + if (crtc_funcs->dpms) + (*crtc_funcs->dpms) (crtc, + drm_helper_choose_crtc_dpms(crtc)); + } + } + + return; +} +EXPORT_SYMBOL(drm_helper_connector_dpms); + /** * drm_hotplug_stage_two * @dev DRM device diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index f01def16a66..019b7c57823 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -481,7 +481,7 @@ int drm_ioctl(struct inode *inode, struct file *filp, } retcode = func(dev, kdata, file_priv); - if ((retcode == 0) && (cmd & IOC_OUT)) { + if (cmd & IOC_OUT) { if (copy_to_user((void __user *)arg, kdata, _IOC_SIZE(cmd)) != 0) retcode = -EFAULT; diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index ca9c6165671..6f6b26479d8 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -289,6 +289,11 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev, struct drm_display_mode *mode; struct detailed_pixel_timing *pt = &timing->data.pixel_data; + /* ignore tiny modes */ + if (((pt->hactive_hi << 8) | pt->hactive_lo) < 64 || + ((pt->vactive_hi << 8) | pt->hactive_lo) < 64) + return NULL; + if (pt->stereo) { printk(KERN_WARNING "stereo mode not supported\n"); return NULL; diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index 93e677a481f..fc8e5acd9d9 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -196,6 +196,7 @@ int drm_irq_install(struct drm_device *dev) { int ret = 0; unsigned long sh_flags = 0; + char *irqname; if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) return -EINVAL; @@ -227,8 +228,13 @@ int drm_irq_install(struct drm_device *dev) if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED)) sh_flags = IRQF_SHARED; + if (dev->devname) + irqname = dev->devname; + else + irqname = dev->driver->name; + ret = request_irq(drm_dev_to_irq(dev), dev->driver->irq_handler, - sh_flags, dev->devname, dev); + sh_flags, irqname, dev); if (ret < 0) { mutex_lock(&dev->struct_mutex); diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c index 8f9372921f8..9987ab88083 100644 --- a/drivers/gpu/drm/drm_sysfs.c +++ b/drivers/gpu/drm/drm_sysfs.c @@ -147,7 +147,7 @@ static ssize_t status_show(struct device *device, enum drm_connector_status status; status = connector->funcs->detect(connector); - return snprintf(buf, PAGE_SIZE, "%s", + return snprintf(buf, PAGE_SIZE, "%s\n", drm_get_connector_status_name(status)); } @@ -166,7 +166,7 @@ static ssize_t dpms_show(struct device *device, if (ret) return 0; - return snprintf(buf, PAGE_SIZE, "%s", + return snprintf(buf, PAGE_SIZE, "%s\n", drm_get_dpms_name((int)dpms_status)); } @@ -176,7 +176,7 @@ static ssize_t enabled_show(struct device *device, { struct drm_connector *connector = to_drm_connector(device); - return snprintf(buf, PAGE_SIZE, connector->encoder ? "enabled" : + return snprintf(buf, PAGE_SIZE, "%s\n", connector->encoder ? "enabled" : "disabled"); } @@ -317,6 +317,7 @@ static struct device_attribute connector_attrs_opt1[] = { static struct bin_attribute edid_attr = { .attr.name = "edid", + .attr.mode = 0444, .size = 128, .read = edid_show, }; diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 051134c56ae..0ccb63ee50e 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -987,12 +987,6 @@ static int i915_load_modeset_init(struct drm_device *dev) int fb_bar = IS_I9XX(dev) ? 2 : 0; int ret = 0; - dev->devname = kstrdup(DRIVER_NAME, GFP_KERNEL); - if (!dev->devname) { - ret = -ENOMEM; - goto out; - } - dev->mode_config.fb_base = drm_get_resource_start(dev, fb_bar) & 0xff000000; @@ -1006,17 +1000,25 @@ static int i915_load_modeset_init(struct drm_device *dev) ret = i915_probe_agp(dev, &agp_size, &prealloc_size); if (ret) - goto kfree_devname; + goto out; /* Basic memrange allocator for stolen space (aka vram) */ drm_mm_init(&dev_priv->vram, 0, prealloc_size); - /* Let GEM Manage from end of prealloc space to end of aperture */ - i915_gem_do_init(dev, prealloc_size, agp_size); + /* Let GEM Manage from end of prealloc space to end of aperture. + * + * However, leave one page at the end still bound to the scratch page. + * There are a number of places where the hardware apparently + * prefetches past the end of the object, and we've seen multiple + * hangs with the GPU head pointer stuck in a batchbuffer bound + * at the last page of the aperture. One page should be enough to + * keep any prefetching inside of the aperture. + */ + i915_gem_do_init(dev, prealloc_size, agp_size - 4096); ret = i915_gem_init_ringbuffer(dev); if (ret) - goto kfree_devname; + goto out; /* Allow hardware batchbuffers unless told otherwise. */ @@ -1048,8 +1050,6 @@ static int i915_load_modeset_init(struct drm_device *dev) destroy_ringbuffer: i915_gem_cleanup_ringbuffer(dev); -kfree_devname: - kfree(dev->devname); out: return ret; } @@ -1350,6 +1350,7 @@ struct drm_ioctl_desc i915_ioctls[] = { DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, 0), DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0), DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, 0), + DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0), }; int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 25065923b8a..c431fa54bbb 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -180,7 +180,8 @@ typedef struct drm_i915_private { int backlight_duty_cycle; /* restore backlight to this value */ bool panel_wants_dither; struct drm_display_mode *panel_fixed_mode; - struct drm_display_mode *vbt_mode; /* if any */ + struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ + struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ /* Feature bits from the VBIOS */ unsigned int int_tv_support:1; @@ -283,6 +284,7 @@ typedef struct drm_i915_private { u8 saveAR[21]; u8 saveDACMASK; u8 saveCR[37]; + uint64_t saveFENCE[16]; struct { struct drm_mm gtt_space; @@ -705,13 +707,8 @@ extern void intel_modeset_cleanup(struct drm_device *dev); #define I915_WRITE16(reg, val) writel(val, dev_priv->regs + (reg)) #define I915_READ8(reg) readb(dev_priv->regs + (reg)) #define I915_WRITE8(reg, val) writeb(val, dev_priv->regs + (reg)) -#ifdef writeq #define I915_WRITE64(reg, val) writeq(val, dev_priv->regs + (reg)) -#else -#define I915_WRITE64(reg, val) (writel(val, dev_priv->regs + (reg)), \ - writel(upper_32_bits(val), dev_priv->regs + \ - (reg) + 4)) -#endif +#define I915_READ64(reg) readq(dev_priv->regs + (reg)) #define POSTING_READ(reg) (void)I915_READ(reg) #define I915_VERBOSE 0 @@ -790,7 +787,8 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); (dev)->pci_device == 0x2E22 || \ (dev)->pci_device == 0x2E32) -#define IS_I965GM(dev) ((dev)->pci_device == 0x2A02) +#define IS_I965GM(dev) ((dev)->pci_device == 0x2A02 || \ + (dev)->pci_device == 0x2A12) #define IS_GM45(dev) ((dev)->pci_device == 0x2A42) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index ee896d91c5b..39f5c658ef5 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -349,7 +349,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE; num_pages = last_data_page - first_data_page + 1; - user_pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL); + user_pages = drm_calloc_large(num_pages, sizeof(struct page *)); if (user_pages == NULL) return -ENOMEM; @@ -429,7 +429,7 @@ fail_put_user_pages: SetPageDirty(user_pages[i]); page_cache_release(user_pages[i]); } - kfree(user_pages); + drm_free_large(user_pages); return ret; } @@ -649,7 +649,7 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE; num_pages = last_data_page - first_data_page + 1; - user_pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL); + user_pages = drm_calloc_large(num_pages, sizeof(struct page *)); if (user_pages == NULL) return -ENOMEM; @@ -719,7 +719,7 @@ out_unlock: out_unpin_pages: for (i = 0; i < pinned_pages; i++) page_cache_release(user_pages[i]); - kfree(user_pages); + drm_free_large(user_pages); return ret; } @@ -824,7 +824,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE; num_pages = last_data_page - first_data_page + 1; - user_pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL); + user_pages = drm_calloc_large(num_pages, sizeof(struct page *)); if (user_pages == NULL) return -ENOMEM; @@ -902,7 +902,7 @@ fail_unlock: fail_put_user_pages: for (i = 0; i < pinned_pages; i++) page_cache_release(user_pages[i]); - kfree(user_pages); + drm_free_large(user_pages); return ret; } @@ -1145,7 +1145,14 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) mutex_unlock(&dev->struct_mutex); return VM_FAULT_SIGBUS; } - list_add(&obj_priv->list, &dev_priv->mm.inactive_list); + + ret = i915_gem_object_set_to_gtt_domain(obj, write); + if (ret) { + mutex_unlock(&dev->struct_mutex); + return VM_FAULT_SIGBUS; + } + + list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list); } /* Need a new fence register? */ @@ -1375,7 +1382,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, mutex_unlock(&dev->struct_mutex); return ret; } - list_add(&obj_priv->list, &dev_priv->mm.inactive_list); + list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list); } drm_gem_object_unreference(obj); @@ -1408,9 +1415,7 @@ i915_gem_object_put_pages(struct drm_gem_object *obj) } obj_priv->dirty = 0; - drm_free(obj_priv->pages, - page_count * sizeof(struct page *), - DRM_MEM_DRIVER); + drm_free_large(obj_priv->pages); obj_priv->pages = NULL; } @@ -1691,11 +1696,20 @@ static int i915_wait_request(struct drm_device *dev, uint32_t seqno) { drm_i915_private_t *dev_priv = dev->dev_private; + u32 ier; int ret = 0; BUG_ON(seqno == 0); if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) { + ier = I915_READ(IER); + if (!ier) { + DRM_ERROR("something (likely vbetool) disabled " + "interrupts, re-enabling\n"); + i915_driver_irq_preinstall(dev); + i915_driver_irq_postinstall(dev); + } + dev_priv->mm.waiting_gem_seqno = seqno; i915_user_irq_get(dev); ret = wait_event_interruptible(dev_priv->irq_queue, @@ -2015,8 +2029,7 @@ i915_gem_object_get_pages(struct drm_gem_object *obj) */ page_count = obj->size / PAGE_SIZE; BUG_ON(obj_priv->pages != NULL); - obj_priv->pages = drm_calloc(page_count, sizeof(struct page *), - DRM_MEM_DRIVER); + obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *)); if (obj_priv->pages == NULL) { DRM_ERROR("Faled to allocate page list\n"); obj_priv->pages_refcount--; @@ -2122,8 +2135,10 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg) return; } - pitch_val = (obj_priv->stride / 128) - 1; - WARN_ON(pitch_val & ~0x0000000f); + pitch_val = obj_priv->stride / 128; + pitch_val = ffs(pitch_val) - 1; + WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL); + val = obj_priv->gtt_offset; if (obj_priv->tiling_mode == I915_TILING_Y) val |= 1 << I830_FENCE_TILING_Y_SHIFT; @@ -2245,9 +2260,6 @@ try_again: goto try_again; } - BUG_ON(old_obj_priv->active || - (reg->obj->write_domain & I915_GEM_GPU_DOMAINS)); - /* * Zap this virtual mapping so we can set up a fence again * for this object next time we need it. @@ -2415,6 +2427,16 @@ i915_gem_clflush_object(struct drm_gem_object *obj) if (obj_priv->pages == NULL) return; + /* XXX: The 865 in particular appears to be weird in how it handles + * cache flushing. We haven't figured it out, but the + * clflush+agp_chipset_flush doesn't appear to successfully get the + * data visible to the PGU, while wbinvd + agp_chipset_flush does. + */ + if (IS_I865G(obj->dev)) { + wbinvd(); + return; + } + drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE); } @@ -3102,7 +3124,7 @@ i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list, reloc_count += exec_list[i].relocation_count; } - *relocs = drm_calloc(reloc_count, sizeof(**relocs), DRM_MEM_DRIVER); + *relocs = drm_calloc_large(reloc_count, sizeof(**relocs)); if (*relocs == NULL) return -ENOMEM; @@ -3116,8 +3138,7 @@ i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list, exec_list[i].relocation_count * sizeof(**relocs)); if (ret != 0) { - drm_free(*relocs, reloc_count * sizeof(**relocs), - DRM_MEM_DRIVER); + drm_free_large(*relocs); *relocs = NULL; return -EFAULT; } @@ -3156,7 +3177,7 @@ i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object *exec_list, } err: - drm_free(relocs, reloc_count * sizeof(*relocs), DRM_MEM_DRIVER); + drm_free_large(relocs); return ret; } @@ -3189,10 +3210,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, return -EINVAL; } /* Copy in the exec list from userland */ - exec_list = drm_calloc(sizeof(*exec_list), args->buffer_count, - DRM_MEM_DRIVER); - object_list = drm_calloc(sizeof(*object_list), args->buffer_count, - DRM_MEM_DRIVER); + exec_list = drm_calloc_large(sizeof(*exec_list), args->buffer_count); + object_list = drm_calloc_large(sizeof(*object_list), args->buffer_count); if (exec_list == NULL || object_list == NULL) { DRM_ERROR("Failed to allocate exec or object list " "for %d buffers\n", @@ -3453,10 +3472,8 @@ err: } pre_mutex_err: - drm_free(object_list, sizeof(*object_list) * args->buffer_count, - DRM_MEM_DRIVER); - drm_free(exec_list, sizeof(*exec_list) * args->buffer_count, - DRM_MEM_DRIVER); + drm_free_large(object_list); + drm_free_large(exec_list); drm_free(cliprects, sizeof(*cliprects) * args->num_cliprects, DRM_MEM_DRIVER); diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 52a059354e8..540dd336e6e 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -213,7 +213,8 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) if (tiling_mode == I915_TILING_NONE) return true; - if (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)) + if (!IS_I9XX(dev) || + (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))) tile_width = 128; else tile_width = 512; @@ -225,11 +226,18 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) if (stride / 128 > I965_FENCE_MAX_PITCH_VAL) return false; } else if (IS_I9XX(dev)) { - if (stride / tile_width > I830_FENCE_MAX_PITCH_VAL || + uint32_t pitch_val = ffs(stride / tile_width) - 1; + + /* XXX: For Y tiling, FENCE_MAX_PITCH_VAL is actually 6 (8KB) + * instead of 4 (2KB) on 945s. + */ + if (pitch_val > I915_FENCE_MAX_PITCH_VAL || size > (I830_FENCE_MAX_SIZE_VAL << 20)) return false; } else { - if (stride / 128 > I830_FENCE_MAX_PITCH_VAL || + uint32_t pitch_val = ffs(stride / tile_width) - 1; + + if (pitch_val > I830_FENCE_MAX_PITCH_VAL || size > (I830_FENCE_MAX_SIZE_VAL << 19)) return false; } diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 52119473226..375569d01d0 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -190,7 +190,8 @@ #define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8) #define I830_FENCE_PITCH_SHIFT 4 #define I830_FENCE_REG_VALID (1<<0) -#define I830_FENCE_MAX_PITCH_VAL 0x10 +#define I915_FENCE_MAX_PITCH_VAL 0x10 +#define I830_FENCE_MAX_PITCH_VAL 6 #define I830_FENCE_MAX_SIZE_VAL (1<<8) #define I915_FENCE_START_MASK 0x0ff00000 @@ -526,6 +527,7 @@ #define DPLLA_INPUT_BUFFER_ENABLE (1 << 0) #define D_STATE 0x6104 #define CG_2D_DIS 0x6200 +#define DPCUNIT_CLOCK_GATE_DISABLE (1 << 24) #define CG_3D_DIS 0x6204 /* @@ -1409,9 +1411,25 @@ /* Cursor A & B regs */ #define CURACNTR 0x70080 +/* Old style CUR*CNTR flags (desktop 8xx) */ +#define CURSOR_ENABLE 0x80000000 +#define CURSOR_GAMMA_ENABLE 0x40000000 +#define CURSOR_STRIDE_MASK 0x30000000 +#define CURSOR_FORMAT_SHIFT 24 +#define CURSOR_FORMAT_MASK (0x07 << CURSOR_FORMAT_SHIFT) +#define CURSOR_FORMAT_2C (0x00 << CURSOR_FORMAT_SHIFT) +#define CURSOR_FORMAT_3C (0x01 << CURSOR_FORMAT_SHIFT) +#define CURSOR_FORMAT_4C (0x02 << CURSOR_FORMAT_SHIFT) +#define CURSOR_FORMAT_ARGB (0x04 << CURSOR_FORMAT_SHIFT) +#define CURSOR_FORMAT_XRGB (0x05 << CURSOR_FORMAT_SHIFT) +/* New style CUR*CNTR flags */ +#define CURSOR_MODE 0x27 #define CURSOR_MODE_DISABLE 0x00 #define CURSOR_MODE_64_32B_AX 0x07 #define CURSOR_MODE_64_ARGB_AX ((1 << 5) | CURSOR_MODE_64_32B_AX) +#define MCURSOR_PIPE_SELECT (1 << 28) +#define MCURSOR_PIPE_A 0x00 +#define MCURSOR_PIPE_B (1 << 28) #define MCURSOR_GAMMA_ENABLE (1 << 26) #define CURABASE 0x70084 #define CURAPOS 0x70088 @@ -1419,6 +1437,7 @@ #define CURSOR_POS_SIGN 0x8000 #define CURSOR_X_SHIFT 0 #define CURSOR_Y_SHIFT 16 +#define CURSIZE 0x700a0 #define CURBCNTR 0x700c0 #define CURBBASE 0x700c4 #define CURBPOS 0x700c8 diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index d669cc2b42c..ce8a21344a7 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c @@ -349,6 +349,18 @@ int i915_save_state(struct drm_device *dev) for (i = 0; i < 3; i++) dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2)); + /* Fences */ + if (IS_I965G(dev)) { + for (i = 0; i < 16; i++) + dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); + } else { + for (i = 0; i < 8; i++) + dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); + + if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) + for (i = 0; i < 8; i++) + dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); + } i915_save_vga(dev); return 0; @@ -371,6 +383,18 @@ int i915_restore_state(struct drm_device *dev) /* Display arbitration */ I915_WRITE(DSPARB, dev_priv->saveDSPARB); + /* Fences */ + if (IS_I965G(dev)) { + for (i = 0; i < 16; i++) + I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]); + } else { + for (i = 0; i < 8; i++) + I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]); + if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) + for (i = 0; i < 8; i++) + I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]); + } + /* Pipe & plane A info */ /* Prime the clock */ if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) { diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index fc28e2bbd54..9d78cff33b2 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -57,9 +57,43 @@ find_section(struct bdb_header *bdb, int section_id) return NULL; } -/* Try to find panel data */ static void -parse_panel_data(struct drm_i915_private *dev_priv, struct bdb_header *bdb) +fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode, + struct lvds_dvo_timing *dvo_timing) +{ + panel_fixed_mode->hdisplay = (dvo_timing->hactive_hi << 8) | + dvo_timing->hactive_lo; + panel_fixed_mode->hsync_start = panel_fixed_mode->hdisplay + + ((dvo_timing->hsync_off_hi << 8) | dvo_timing->hsync_off_lo); + panel_fixed_mode->hsync_end = panel_fixed_mode->hsync_start + + dvo_timing->hsync_pulse_width; + panel_fixed_mode->htotal = panel_fixed_mode->hdisplay + + ((dvo_timing->hblank_hi << 8) | dvo_timing->hblank_lo); + + panel_fixed_mode->vdisplay = (dvo_timing->vactive_hi << 8) | + dvo_timing->vactive_lo; + panel_fixed_mode->vsync_start = panel_fixed_mode->vdisplay + + dvo_timing->vsync_off; + panel_fixed_mode->vsync_end = panel_fixed_mode->vsync_start + + dvo_timing->vsync_pulse_width; + panel_fixed_mode->vtotal = panel_fixed_mode->vdisplay + + ((dvo_timing->vblank_hi << 8) | dvo_timing->vblank_lo); + panel_fixed_mode->clock = dvo_timing->clock * 10; + panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED; + + /* Some VBTs have bogus h/vtotal values */ + if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal) + panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1; + if (panel_fixed_mode->vsync_end > panel_fixed_mode->vtotal) + panel_fixed_mode->vtotal = panel_fixed_mode->vsync_end + 1; + + drm_mode_set_name(panel_fixed_mode); +} + +/* Try to find integrated panel data */ +static void +parse_lfp_panel_data(struct drm_i915_private *dev_priv, + struct bdb_header *bdb) { struct bdb_lvds_options *lvds_options; struct bdb_lvds_lfp_data *lvds_lfp_data; @@ -91,35 +125,9 @@ parse_panel_data(struct drm_i915_private *dev_priv, struct bdb_header *bdb) panel_fixed_mode = drm_calloc(1, sizeof(*panel_fixed_mode), DRM_MEM_DRIVER); - panel_fixed_mode->hdisplay = (dvo_timing->hactive_hi << 8) | - dvo_timing->hactive_lo; - panel_fixed_mode->hsync_start = panel_fixed_mode->hdisplay + - ((dvo_timing->hsync_off_hi << 8) | dvo_timing->hsync_off_lo); - panel_fixed_mode->hsync_end = panel_fixed_mode->hsync_start + - dvo_timing->hsync_pulse_width; - panel_fixed_mode->htotal = panel_fixed_mode->hdisplay + - ((dvo_timing->hblank_hi << 8) | dvo_timing->hblank_lo); + fill_detail_timing_data(panel_fixed_mode, dvo_timing); - panel_fixed_mode->vdisplay = (dvo_timing->vactive_hi << 8) | - dvo_timing->vactive_lo; - panel_fixed_mode->vsync_start = panel_fixed_mode->vdisplay + - dvo_timing->vsync_off; - panel_fixed_mode->vsync_end = panel_fixed_mode->vsync_start + - dvo_timing->vsync_pulse_width; - panel_fixed_mode->vtotal = panel_fixed_mode->vdisplay + - ((dvo_timing->vblank_hi << 8) | dvo_timing->vblank_lo); - panel_fixed_mode->clock = dvo_timing->clock * 10; - panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED; - - /* Some VBTs have bogus h/vtotal values */ - if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal) - panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1; - if (panel_fixed_mode->vsync_end > panel_fixed_mode->vtotal) - panel_fixed_mode->vtotal = panel_fixed_mode->vsync_end + 1; - - drm_mode_set_name(panel_fixed_mode); - - dev_priv->vbt_mode = panel_fixed_mode; + dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode; DRM_DEBUG("Found panel mode in BIOS VBT tables:\n"); drm_mode_debug_printmodeline(panel_fixed_mode); @@ -127,6 +135,39 @@ parse_panel_data(struct drm_i915_private *dev_priv, struct bdb_header *bdb) return; } +/* Try to find sdvo panel data */ +static void +parse_sdvo_panel_data(struct drm_i915_private *dev_priv, + struct bdb_header *bdb) +{ + struct bdb_sdvo_lvds_options *sdvo_lvds_options; + struct lvds_dvo_timing *dvo_timing; + struct drm_display_mode *panel_fixed_mode; + + dev_priv->sdvo_lvds_vbt_mode = NULL; + + sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS); + if (!sdvo_lvds_options) + return; + + dvo_timing = find_section(bdb, BDB_SDVO_PANEL_DTDS); + if (!dvo_timing) + return; + + panel_fixed_mode = drm_calloc(1, sizeof(*panel_fixed_mode), + DRM_MEM_DRIVER); + + if (!panel_fixed_mode) + return; + + fill_detail_timing_data(panel_fixed_mode, + dvo_timing + sdvo_lvds_options->panel_type); + + dev_priv->sdvo_lvds_vbt_mode = panel_fixed_mode; + + return; +} + static void parse_general_features(struct drm_i915_private *dev_priv, struct bdb_header *bdb) @@ -199,7 +240,8 @@ intel_init_bios(struct drm_device *dev) /* Grab useful general definitions */ parse_general_features(dev_priv, bdb); - parse_panel_data(dev_priv, bdb); + parse_lfp_panel_data(dev_priv, bdb); + parse_sdvo_panel_data(dev_priv, bdb); pci_unmap_rom(pdev, bios); diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h index de621aad85b..8ca2cde1580 100644 --- a/drivers/gpu/drm/i915/intel_bios.h +++ b/drivers/gpu/drm/i915/intel_bios.h @@ -279,6 +279,23 @@ struct vch_bdb_22 { struct vch_panel_data panels[16]; } __attribute__((packed)); +struct bdb_sdvo_lvds_options { + u8 panel_backlight; + u8 h40_set_panel_type; + u8 panel_type; + u8 ssc_clk_freq; + u16 als_low_trip; + u16 als_high_trip; + u8 sclalarcoeff_tab_row_num; + u8 sclalarcoeff_tab_row_size; + u8 coefficient[8]; + u8 panel_misc_bits_1; + u8 panel_misc_bits_2; + u8 panel_misc_bits_3; + u8 panel_misc_bits_4; +} __attribute__((packed)); + + bool intel_init_bios(struct drm_device *dev); /* diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 9bdd959260a..79acc4f4c1f 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -161,7 +161,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector) hotplug_en &= CRT_FORCE_HOTPLUG_MASK; hotplug_en |= CRT_HOTPLUG_FORCE_DETECT; - if (IS_GM45(dev)) + if (IS_G4X(dev)) hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; @@ -198,9 +198,142 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector) return intel_ddc_probe(intel_output); } +static enum drm_connector_status +intel_crt_load_detect(struct drm_crtc *crtc, struct intel_output *intel_output) +{ + struct drm_encoder *encoder = &intel_output->enc; + struct drm_device *dev = encoder->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + uint32_t pipe = intel_crtc->pipe; + uint32_t save_bclrpat; + uint32_t save_vtotal; + uint32_t vtotal, vactive; + uint32_t vsample; + uint32_t vblank, vblank_start, vblank_end; + uint32_t dsl; + uint32_t bclrpat_reg; + uint32_t vtotal_reg; + uint32_t vblank_reg; + uint32_t vsync_reg; + uint32_t pipeconf_reg; + uint32_t pipe_dsl_reg; + uint8_t st00; + enum drm_connector_status status; + + if (pipe == 0) { + bclrpat_reg = BCLRPAT_A; + vtotal_reg = VTOTAL_A; + vblank_reg = VBLANK_A; + vsync_reg = VSYNC_A; + pipeconf_reg = PIPEACONF; + pipe_dsl_reg = PIPEADSL; + } else { + bclrpat_reg = BCLRPAT_B; + vtotal_reg = VTOTAL_B; + vblank_reg = VBLANK_B; + vsync_reg = VSYNC_B; + pipeconf_reg = PIPEBCONF; + pipe_dsl_reg = PIPEBDSL; + } + + save_bclrpat = I915_READ(bclrpat_reg); + save_vtotal = I915_READ(vtotal_reg); + vblank = I915_READ(vblank_reg); + + vtotal = ((save_vtotal >> 16) & 0xfff) + 1; + vactive = (save_vtotal & 0x7ff) + 1; + + vblank_start = (vblank & 0xfff) + 1; + vblank_end = ((vblank >> 16) & 0xfff) + 1; + + /* Set the border color to purple. */ + I915_WRITE(bclrpat_reg, 0x500050); + + if (IS_I9XX(dev)) { + uint32_t pipeconf = I915_READ(pipeconf_reg); + I915_WRITE(pipeconf_reg, pipeconf | PIPECONF_FORCE_BORDER); + /* Wait for next Vblank to substitue + * border color for Color info */ + intel_wait_for_vblank(dev); + st00 = I915_READ8(VGA_MSR_WRITE); + status = ((st00 & (1 << 4)) != 0) ? + connector_status_connected : + connector_status_disconnected; + + I915_WRITE(pipeconf_reg, pipeconf); + } else { + bool restore_vblank = false; + int count, detect; + + /* + * If there isn't any border, add some. + * Yes, this will flicker + */ + if (vblank_start <= vactive && vblank_end >= vtotal) { + uint32_t vsync = I915_READ(vsync_reg); + uint32_t vsync_start = (vsync & 0xffff) + 1; + + vblank_start = vsync_start; + I915_WRITE(vblank_reg, + (vblank_start - 1) | + ((vblank_end - 1) << 16)); + restore_vblank = true; + } + /* sample in the vertical border, selecting the larger one */ + if (vblank_start - vactive >= vtotal - vblank_end) + vsample = (vblank_start + vactive) >> 1; + else + vsample = (vtotal + vblank_end) >> 1; + + /* + * Wait for the border to be displayed + */ + while (I915_READ(pipe_dsl_reg) >= vactive) + ; + while ((dsl = I915_READ(pipe_dsl_reg)) <= vsample) + ; + /* + * Watch ST00 for an entire scanline + */ + detect = 0; + count = 0; + do { + count++; + /* Read the ST00 VGA status register */ + st00 = I915_READ8(VGA_MSR_WRITE); + if (st00 & (1 << 4)) + detect++; + } while ((I915_READ(pipe_dsl_reg) == dsl)); + + /* restore vblank if necessary */ + if (restore_vblank) + I915_WRITE(vblank_reg, vblank); + /* + * If more than 3/4 of the scanline detected a monitor, + * then it is assumed to be present. This works even on i830, + * where there isn't any way to force the border color across + * the screen + */ + status = detect * 4 > count * 3 ? + connector_status_connected : + connector_status_disconnected; + } + + /* Restore previous settings */ + I915_WRITE(bclrpat_reg, save_bclrpat); + + return status; +} + static enum drm_connector_status intel_crt_detect(struct drm_connector *connector) { struct drm_device *dev = connector->dev; + struct intel_output *intel_output = to_intel_output(connector); + struct drm_encoder *encoder = &intel_output->enc; + struct drm_crtc *crtc; + int dpms_mode; + enum drm_connector_status status; if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev)) { if (intel_crt_detect_hotplug(connector)) @@ -212,8 +345,20 @@ static enum drm_connector_status intel_crt_detect(struct drm_connector *connecto if (intel_crt_detect_ddc(connector)) return connector_status_connected; - /* TODO use load detect */ - return connector_status_unknown; + /* for pre-945g platforms use load detect */ + if (encoder->crtc && encoder->crtc->enabled) { + status = intel_crt_load_detect(encoder->crtc, intel_output); + } else { + crtc = intel_get_load_detect_pipe(intel_output, + NULL, &dpms_mode); + if (crtc) { + status = intel_crt_load_detect(crtc, intel_output); + intel_release_load_detect_pipe(intel_output, dpms_mode); + } else + status = connector_status_unknown; + } + + return status; } static void intel_crt_destroy(struct drm_connector *connector) @@ -236,11 +381,6 @@ static int intel_crt_set_property(struct drm_connector *connector, struct drm_property *property, uint64_t value) { - struct drm_device *dev = connector->dev; - - if (property == dev->mode_config.dpms_property && connector->encoder) - intel_crt_dpms(connector->encoder, (uint32_t)(value & 0xf)); - return 0; } @@ -257,6 +397,7 @@ static const struct drm_encoder_helper_funcs intel_crt_helper_funcs = { }; static const struct drm_connector_funcs intel_crt_connector_funcs = { + .dpms = drm_helper_connector_dpms, .detect = intel_crt_detect, .fill_modes = drm_helper_probe_single_connector_modes, .destroy = intel_crt_destroy, diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index bdcda36953b..c9d6f10ba92 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1357,7 +1357,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, int pipe = intel_crtc->pipe; uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR; uint32_t base = (pipe == 0) ? CURABASE : CURBBASE; - uint32_t temp; + uint32_t temp = I915_READ(control); size_t addr; int ret; @@ -1366,7 +1366,12 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, /* if we want to turn off the cursor ignore width and height */ if (!handle) { DRM_DEBUG("cursor off\n"); - temp = CURSOR_MODE_DISABLE; + if (IS_MOBILE(dev) || IS_I9XX(dev)) { + temp &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); + temp |= CURSOR_MODE_DISABLE; + } else { + temp &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE); + } addr = 0; bo = NULL; mutex_lock(&dev->struct_mutex); @@ -1409,10 +1414,19 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, addr = obj_priv->phys_obj->handle->busaddr; } - temp = 0; - /* set the pipe for the cursor */ - temp |= (pipe << 28); - temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE; + if (!IS_I9XX(dev)) + I915_WRITE(CURSIZE, (height << 12) | width); + + /* Hooray for CUR*CNTR differences */ + if (IS_MOBILE(dev) || IS_I9XX(dev)) { + temp &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT); + temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE; + temp |= (pipe << 28); /* Connect to correct pipe */ + } else { + temp &= ~(CURSOR_FORMAT_MASK); + temp |= CURSOR_ENABLE; + temp |= CURSOR_FORMAT_ARGB | CURSOR_GAMMA_ENABLE; + } finish: I915_WRITE(control, temp); @@ -1804,6 +1818,37 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) } } +int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; + struct drm_crtc *crtc = NULL; + int pipe = -1; + + if (!dev_priv) { + DRM_ERROR("called with no initialization\n"); + return -EINVAL; + } + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + if (crtc->base.id == pipe_from_crtc_id->crtc_id) { + pipe = intel_crtc->pipe; + break; + } + } + + if (pipe == -1) { + DRM_ERROR("no such CRTC id\n"); + return -EINVAL; + } + + pipe_from_crtc_id->pipe = pipe; + + return 0; +} + struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe) { struct drm_crtc *crtc = NULL; diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 957daef8edf..cd4b9c5f715 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -109,7 +109,7 @@ struct intel_i2c_chan *intel_i2c_create(struct drm_device *dev, const u32 reg, void intel_i2c_destroy(struct intel_i2c_chan *chan); int intel_ddc_get_modes(struct intel_output *intel_output); extern bool intel_ddc_probe(struct intel_output *intel_output); - +void intel_i2c_quirk_set(struct drm_device *dev, bool enable); extern void intel_crt_init(struct drm_device *dev); extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg); extern bool intel_sdvo_init(struct drm_device *dev, int output_device); @@ -125,6 +125,8 @@ extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector); extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, struct drm_crtc *crtc); +int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, + struct drm_file *file_priv); extern void intel_wait_for_vblank(struct drm_device *dev); extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe); extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_output *intel_output, diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c index 8b8d6e65cd3..1ee3007d6ec 100644 --- a/drivers/gpu/drm/i915/intel_dvo.c +++ b/drivers/gpu/drm/i915/intel_dvo.c @@ -316,6 +316,7 @@ static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = { }; static const struct drm_connector_funcs intel_dvo_connector_funcs = { + .dpms = drm_helper_connector_dpms, .save = intel_dvo_save, .restore = intel_dvo_restore, .detect = intel_dvo_detect, diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index 3e094beecb9..e4652dcdd9b 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c @@ -864,7 +864,7 @@ static void intelfb_sysrq(int dummy1, struct tty_struct *dummy3) static struct sysrq_key_op sysrq_intelfb_restore_op = { .handler = intelfb_sysrq, - .help_msg = "force-fb(G)", + .help_msg = "force-fb(V)", .action_msg = "Restore framebuffer console", }; @@ -898,7 +898,7 @@ int intelfb_probe(struct drm_device *dev) ret = intelfb_single_fb_probe(dev); } - register_sysrq_key('g', &sysrq_intelfb_restore_op); + register_sysrq_key('v', &sysrq_intelfb_restore_op); return ret; } diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 55037422538..7d6bdd70532 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -155,11 +155,18 @@ intel_hdmi_detect(struct drm_connector *connector) temp = I915_READ(PORT_HOTPLUG_EN); - I915_WRITE(PORT_HOTPLUG_EN, - temp | - HDMIB_HOTPLUG_INT_EN | - HDMIC_HOTPLUG_INT_EN | - HDMID_HOTPLUG_INT_EN); + switch (hdmi_priv->sdvox_reg) { + case SDVOB: + temp |= HDMIB_HOTPLUG_INT_EN; + break; + case SDVOC: + temp |= HDMIC_HOTPLUG_INT_EN; + break; + default: + return connector_status_unknown; + } + + I915_WRITE(PORT_HOTPLUG_EN, temp); POSTING_READ(PORT_HOTPLUG_EN); @@ -212,6 +219,7 @@ static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = { }; static const struct drm_connector_funcs intel_hdmi_connector_funcs = { + .dpms = drm_helper_connector_dpms, .save = intel_hdmi_save, .restore = intel_hdmi_restore, .detect = intel_hdmi_detect, diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index 5ee9d4c2575..f7061f68d05 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -34,6 +34,21 @@ #include "i915_drm.h" #include "i915_drv.h" +void intel_i2c_quirk_set(struct drm_device *dev, bool enable) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + /* When using bit bashing for I2C, this bit needs to be set to 1 */ + if (!IS_IGD(dev)) + return; + if (enable) + I915_WRITE(CG_2D_DIS, + I915_READ(CG_2D_DIS) | DPCUNIT_CLOCK_GATE_DISABLE); + else + I915_WRITE(CG_2D_DIS, + I915_READ(CG_2D_DIS) & (~DPCUNIT_CLOCK_GATE_DISABLE)); +} + /* * Intel GPIO access functions */ @@ -153,8 +168,10 @@ struct intel_i2c_chan *intel_i2c_create(struct drm_device *dev, const u32 reg, goto out_free; /* JJJ: raise SCL and SDA? */ + intel_i2c_quirk_set(dev, true); set_data(chan, 1); set_clock(chan, 1); + intel_i2c_quirk_set(dev, false); udelay(20); return chan; diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 6619f26e46a..53cccfa58b9 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -343,11 +343,6 @@ static int intel_lvds_set_property(struct drm_connector *connector, struct drm_property *property, uint64_t value) { - struct drm_device *dev = connector->dev; - - if (property == dev->mode_config.dpms_property && connector->encoder) - intel_lvds_dpms(connector->encoder, (uint32_t)(value & 0xf)); - return 0; } @@ -366,6 +361,7 @@ static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs }; static const struct drm_connector_funcs intel_lvds_connector_funcs = { + .dpms = drm_helper_connector_dpms, .save = intel_lvds_save, .restore = intel_lvds_restore, .detect = intel_lvds_detect, @@ -384,7 +380,51 @@ static const struct drm_encoder_funcs intel_lvds_enc_funcs = { .destroy = intel_lvds_enc_destroy, }; +static int __init intel_no_lvds_dmi_callback(const struct dmi_system_id *id) +{ + DRM_DEBUG("Skipping LVDS initialization for %s\n", id->ident); + return 1; +} +/* These systems claim to have LVDS, but really don't */ +static const struct dmi_system_id intel_no_lvds[] = { + { + .callback = intel_no_lvds_dmi_callback, + .ident = "Apple Mac Mini (Core series)", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Macmini1,1"), + }, + }, + { + .callback = intel_no_lvds_dmi_callback, + .ident = "Apple Mac Mini (Core 2 series)", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Macmini2,1"), + }, + }, + { + .callback = intel_no_lvds_dmi_callback, + .ident = "MSI IM-945GSE-A", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "MSI"), + DMI_MATCH(DMI_PRODUCT_NAME, "A9830IMS"), + }, + }, + { + .callback = intel_no_lvds_dmi_callback, + .ident = "Dell Studio Hybrid", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Studio Hybrid 140g"), + }, + }, + + /* FIXME: add a check for the Aopen Mini PC */ + + { } /* terminating entry */ +}; /** * intel_lvds_init - setup LVDS connectors on this device @@ -404,15 +444,9 @@ void intel_lvds_init(struct drm_device *dev) u32 lvds; int pipe; - /* Blacklist machines that we know falsely report LVDS. */ - /* FIXME: add a check for the Aopen Mini PC */ - - /* Apple Mac Mini Core Duo and Mac Mini Core 2 Duo */ - if(dmi_match(DMI_PRODUCT_NAME, "Macmini1,1") || - dmi_match(DMI_PRODUCT_NAME, "Macmini2,1")) { - DRM_DEBUG("Skipping LVDS initialization for Apple Mac Mini\n"); + /* Skip init on machines we know falsely report LVDS */ + if (dmi_check_system(intel_no_lvds)) return; - } intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL); if (!intel_output) { @@ -473,10 +507,10 @@ void intel_lvds_init(struct drm_device *dev) } /* Failed to get EDID, what about VBT? */ - if (dev_priv->vbt_mode) { + if (dev_priv->lfp_lvds_vbt_mode) { mutex_lock(&dev->mode_config.mutex); dev_priv->panel_fixed_mode = - drm_mode_duplicate(dev, dev_priv->vbt_mode); + drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode); mutex_unlock(&dev->mode_config.mutex); if (dev_priv->panel_fixed_mode) { dev_priv->panel_fixed_mode->type |= diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c index 07d7ec97616..e0910fefce8 100644 --- a/drivers/gpu/drm/i915/intel_modes.c +++ b/drivers/gpu/drm/i915/intel_modes.c @@ -27,6 +27,7 @@ #include #include "drmP.h" #include "intel_drv.h" +#include "i915_drv.h" /** * intel_ddc_probe @@ -52,7 +53,10 @@ bool intel_ddc_probe(struct intel_output *intel_output) } }; + intel_i2c_quirk_set(intel_output->ddc_bus->drm_dev, true); ret = i2c_transfer(&intel_output->ddc_bus->adapter, msgs, 2); + intel_i2c_quirk_set(intel_output->ddc_bus->drm_dev, false); + if (ret == 2) return true; @@ -70,8 +74,10 @@ int intel_ddc_get_modes(struct intel_output *intel_output) struct edid *edid; int ret = 0; + intel_i2c_quirk_set(intel_output->ddc_bus->drm_dev, true); edid = drm_get_edid(&intel_output->base, &intel_output->ddc_bus->adapter); + intel_i2c_quirk_set(intel_output->ddc_bus->drm_dev, false); if (edid) { drm_mode_connector_update_edid_property(&intel_output->base, edid); diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 9913651c1e1..3093b4d4a4d 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -69,6 +69,10 @@ struct intel_sdvo_priv { * This is set if we treat the device as HDMI, instead of DVI. */ bool is_hdmi; + /** + * This is set if we detect output of sdvo device as LVDS. + */ + bool is_lvds; /** * Returned SDTV resolutions allowed for the current format, if the @@ -1398,10 +1402,8 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) { struct intel_output *intel_output = to_intel_output(connector); - struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; /* set the bus switch and get the modes */ - intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus); intel_ddc_get_modes(intel_output); #if 0 @@ -1543,6 +1545,37 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector) } } +static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) +{ + struct intel_output *intel_output = to_intel_output(connector); + struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; + struct drm_i915_private *dev_priv = connector->dev->dev_private; + + /* + * Attempt to get the mode list from DDC. + * Assume that the preferred modes are + * arranged in priority order. + */ + /* set the bus switch and get the modes */ + intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus); + intel_ddc_get_modes(intel_output); + if (list_empty(&connector->probed_modes) == false) + return; + + /* Fetch modes from VBT */ + if (dev_priv->sdvo_lvds_vbt_mode != NULL) { + struct drm_display_mode *newmode; + newmode = drm_mode_duplicate(connector->dev, + dev_priv->sdvo_lvds_vbt_mode); + if (newmode != NULL) { + /* Guarantee the mode is preferred */ + newmode->type = (DRM_MODE_TYPE_PREFERRED | + DRM_MODE_TYPE_DRIVER); + drm_mode_probed_add(connector, newmode); + } + } +} + static int intel_sdvo_get_modes(struct drm_connector *connector) { struct intel_output *output = to_intel_output(connector); @@ -1550,6 +1583,8 @@ static int intel_sdvo_get_modes(struct drm_connector *connector) if (sdvo_priv->is_tv) intel_sdvo_get_tv_modes(connector); + else if (sdvo_priv->is_lvds == true) + intel_sdvo_get_lvds_modes(connector); else intel_sdvo_get_ddc_modes(connector); @@ -1564,6 +1599,9 @@ static void intel_sdvo_destroy(struct drm_connector *connector) if (intel_output->i2c_bus) intel_i2c_destroy(intel_output->i2c_bus); + if (intel_output->ddc_bus) + intel_i2c_destroy(intel_output->ddc_bus); + drm_sysfs_connector_remove(connector); drm_connector_cleanup(connector); kfree(intel_output); @@ -1578,6 +1616,7 @@ static const struct drm_encoder_helper_funcs intel_sdvo_helper_funcs = { }; static const struct drm_connector_funcs intel_sdvo_connector_funcs = { + .dpms = drm_helper_connector_dpms, .save = intel_sdvo_save, .restore = intel_sdvo_restore, .detect = intel_sdvo_detect, @@ -1660,12 +1699,56 @@ intel_sdvo_get_digital_encoding_mode(struct intel_output *output) return true; } +static struct intel_output * +intel_sdvo_chan_to_intel_output(struct intel_i2c_chan *chan) +{ + struct drm_device *dev = chan->drm_dev; + struct drm_connector *connector; + struct intel_output *intel_output = NULL; + + list_for_each_entry(connector, + &dev->mode_config.connector_list, head) { + if (to_intel_output(connector)->ddc_bus == chan) { + intel_output = to_intel_output(connector); + break; + } + } + return intel_output; +} + +static int intel_sdvo_master_xfer(struct i2c_adapter *i2c_adap, + struct i2c_msg msgs[], int num) +{ + struct intel_output *intel_output; + struct intel_sdvo_priv *sdvo_priv; + struct i2c_algo_bit_data *algo_data; + struct i2c_algorithm *algo; + + algo_data = (struct i2c_algo_bit_data *)i2c_adap->algo_data; + intel_output = + intel_sdvo_chan_to_intel_output( + (struct intel_i2c_chan *)(algo_data->data)); + if (intel_output == NULL) + return -EINVAL; + + sdvo_priv = intel_output->dev_priv; + algo = (struct i2c_algorithm *)intel_output->i2c_bus->adapter.algo; + + intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus); + return algo->master_xfer(i2c_adap, msgs, num); +} + +static struct i2c_algorithm intel_sdvo_i2c_bit_algo = { + .master_xfer = intel_sdvo_master_xfer, +}; + bool intel_sdvo_init(struct drm_device *dev, int output_device) { struct drm_connector *connector; struct intel_output *intel_output; struct intel_sdvo_priv *sdvo_priv; struct intel_i2c_chan *i2cbus = NULL; + struct intel_i2c_chan *ddcbus = NULL; int connector_type; u8 ch[0x40]; int i; @@ -1676,17 +1759,9 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device) return false; } - connector = &intel_output->base; - - drm_connector_init(dev, connector, &intel_sdvo_connector_funcs, - DRM_MODE_CONNECTOR_Unknown); - drm_connector_helper_add(connector, &intel_sdvo_connector_helper_funcs); sdvo_priv = (struct intel_sdvo_priv *)(intel_output + 1); intel_output->type = INTEL_OUTPUT_SDVO; - connector->interlace_allowed = 0; - connector->doublescan_allowed = 0; - /* setup the DDC bus. */ if (output_device == SDVOB) i2cbus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB"); @@ -1694,7 +1769,7 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device) i2cbus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC"); if (!i2cbus) - goto err_connector; + goto err_inteloutput; sdvo_priv->i2c_bus = i2cbus; @@ -1710,7 +1785,6 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device) intel_output->i2c_bus = i2cbus; intel_output->dev_priv = sdvo_priv; - /* Read the regs to test if we can talk to the device */ for (i = 0; i < 0x40; i++) { if (!intel_sdvo_read_byte(intel_output, i, &ch[i])) { @@ -1720,6 +1794,22 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device) } } + /* setup the DDC bus. */ + if (output_device == SDVOB) + ddcbus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS"); + else + ddcbus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS"); + + if (ddcbus == NULL) + goto err_i2c; + + intel_sdvo_i2c_bit_algo.functionality = + intel_output->i2c_bus->adapter.algo->functionality; + ddcbus->adapter.algo = &intel_sdvo_i2c_bit_algo; + intel_output->ddc_bus = ddcbus; + + /* In defaut case sdvo lvds is false */ + sdvo_priv->is_lvds = false; intel_sdvo_get_capabilities(intel_output, &sdvo_priv->caps); if (sdvo_priv->caps.output_flags & @@ -1729,7 +1819,6 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device) else sdvo_priv->controlled_output = SDVO_OUTPUT_TMDS1; - connector->display_info.subpixel_order = SubPixelHorizontalRGB; encoder_type = DRM_MODE_ENCODER_TMDS; connector_type = DRM_MODE_CONNECTOR_DVID; @@ -1747,7 +1836,6 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device) else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_SVID0) { sdvo_priv->controlled_output = SDVO_OUTPUT_SVID0; - connector->display_info.subpixel_order = SubPixelHorizontalRGB; encoder_type = DRM_MODE_ENCODER_TVDAC; connector_type = DRM_MODE_CONNECTOR_SVIDEO; sdvo_priv->is_tv = true; @@ -1756,30 +1844,28 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device) else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB0) { sdvo_priv->controlled_output = SDVO_OUTPUT_RGB0; - connector->display_info.subpixel_order = SubPixelHorizontalRGB; encoder_type = DRM_MODE_ENCODER_DAC; connector_type = DRM_MODE_CONNECTOR_VGA; } else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB1) { sdvo_priv->controlled_output = SDVO_OUTPUT_RGB1; - connector->display_info.subpixel_order = SubPixelHorizontalRGB; encoder_type = DRM_MODE_ENCODER_DAC; connector_type = DRM_MODE_CONNECTOR_VGA; } else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_LVDS0) { sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0; - connector->display_info.subpixel_order = SubPixelHorizontalRGB; encoder_type = DRM_MODE_ENCODER_LVDS; connector_type = DRM_MODE_CONNECTOR_LVDS; + sdvo_priv->is_lvds = true; } else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_LVDS1) { sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS1; - connector->display_info.subpixel_order = SubPixelHorizontalRGB; encoder_type = DRM_MODE_ENCODER_LVDS; connector_type = DRM_MODE_CONNECTOR_LVDS; + sdvo_priv->is_lvds = true; } else { @@ -1795,9 +1881,16 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device) goto err_i2c; } + connector = &intel_output->base; + drm_connector_init(dev, connector, &intel_sdvo_connector_funcs, + connector_type); + drm_connector_helper_add(connector, &intel_sdvo_connector_helper_funcs); + connector->interlace_allowed = 0; + connector->doublescan_allowed = 0; + connector->display_info.subpixel_order = SubPixelHorizontalRGB; + drm_encoder_init(dev, &intel_output->enc, &intel_sdvo_enc_funcs, encoder_type); drm_encoder_helper_add(&intel_output->enc, &intel_sdvo_helper_funcs); - connector->connector_type = connector_type; drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc); drm_sysfs_connector_add(connector); @@ -1829,14 +1922,13 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device) sdvo_priv->caps.output_flags & (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N'); - intel_output->ddc_bus = i2cbus; - return true; err_i2c: + if (ddcbus != NULL) + intel_i2c_destroy(intel_output->ddc_bus); intel_i2c_destroy(intel_output->i2c_bus); -err_connector: - drm_connector_cleanup(connector); +err_inteloutput: kfree(intel_output); return false; diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index d2c32983242..98ac0546b7b 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -1626,6 +1626,7 @@ static const struct drm_encoder_helper_funcs intel_tv_helper_funcs = { }; static const struct drm_connector_funcs intel_tv_connector_funcs = { + .dpms = drm_helper_connector_dpms, .save = intel_tv_save, .restore = intel_tv_restore, .detect = intel_tv_detect, diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c index 77a7a4d8465..aff90bb9648 100644 --- a/drivers/gpu/drm/radeon/radeon_cp.c +++ b/drivers/gpu/drm/radeon/radeon_cp.c @@ -2185,9 +2185,9 @@ void radeon_commit_ring(drm_radeon_private_t *dev_priv) /* check if the ring is padded out to 16-dword alignment */ - tail_aligned = dev_priv->ring.tail & 0xf; + tail_aligned = dev_priv->ring.tail & (RADEON_RING_ALIGN-1); if (tail_aligned) { - int num_p2 = 16 - tail_aligned; + int num_p2 = RADEON_RING_ALIGN - tail_aligned; ring = dev_priv->ring.start; /* pad with some CP_PACKET2 */ diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h index 8071d965f14..0c6bfc1de15 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.h +++ b/drivers/gpu/drm/radeon/radeon_drv.h @@ -1964,11 +1964,14 @@ do { \ #define RING_LOCALS int write, _nr, _align_nr; unsigned int mask; u32 *ring; +#define RADEON_RING_ALIGN 16 + #define BEGIN_RING( n ) do { \ if ( RADEON_VERBOSE ) { \ DRM_INFO( "BEGIN_RING( %d )\n", (n)); \ } \ - _align_nr = (n + 0xf) & ~0xf; \ + _align_nr = RADEON_RING_ALIGN - ((dev_priv->ring.tail + n) & (RADEON_RING_ALIGN-1)); \ + _align_nr += n; \ if (dev_priv->ring.space <= (_align_nr * sizeof(u32))) { \ COMMIT_RING(); \ radeon_wait_ring( dev_priv, _align_nr * sizeof(u32)); \ diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index aa1b995dd03..4d5ee2bbc62 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -113,6 +113,11 @@ #define USB_VENDOR_ID_BERKSHIRE 0x0c98 #define USB_DEVICE_ID_BERKSHIRE_PCWD 0x1140 +#define USB_VENDOR_ID_CH 0x068e +#define USB_DEVICE_ID_CH_PRO_PEDALS 0x00f2 +#define USB_DEVICE_ID_CH_COMBATSTICK 0x00f4 +#define USB_DEVICE_ID_CH_FLIGHT_SIM_YOKE 0x00ff + #define USB_VENDOR_ID_CHERRY 0x046a #define USB_DEVICE_ID_CHERRY_CYMOTION 0x0023 diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c index 900ce18dd54..ac8049b5f1e 100644 --- a/drivers/hid/usbhid/hid-core.c +++ b/drivers/hid/usbhid/hid-core.c @@ -898,7 +898,7 @@ static int usbhid_parse(struct hid_device *hid) goto err; } - hid->quirks = quirks; + hid->quirks |= quirks; return 0; err: diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c index 4391717d251..d8f7423f363 100644 --- a/drivers/hid/usbhid/hid-quirks.c +++ b/drivers/hid/usbhid/hid-quirks.c @@ -50,6 +50,9 @@ static const struct hid_blacklist { { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM, HID_QUIRK_NOGET }, { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVM, HID_QUIRK_NOGET }, { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVMC, HID_QUIRK_NOGET }, + { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_COMBATSTICK, HID_QUIRK_NOGET }, + { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FLIGHT_SIM_YOKE, HID_QUIRK_NOGET }, + { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_PRO_PEDALS, HID_QUIRK_NOGET }, { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET }, { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET }, { USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET }, diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c index b5e3b285169..a1787fdf5b9 100644 --- a/drivers/hwmon/lm78.c +++ b/drivers/hwmon/lm78.c @@ -182,7 +182,7 @@ static struct platform_driver lm78_isa_driver = { .name = "lm78", }, .probe = lm78_isa_probe, - .remove = lm78_isa_remove, + .remove = __devexit_p(lm78_isa_remove), }; diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index a48c8aee021..f1c6ca7e285 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -467,7 +467,7 @@ config I2C_PXA_SLAVE config I2C_S3C2410 tristate "S3C2410 I2C Driver" - depends on ARCH_S3C2410 + depends on ARCH_S3C2410 || ARCH_S3C64XX help Say Y here to include support for I2C controller in the Samsung S3C2410 based System-on-Chip devices. diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c index 3fcf78e906d..b5db8b88361 100644 --- a/drivers/i2c/busses/i2c-cpm.c +++ b/drivers/i2c/busses/i2c-cpm.c @@ -531,16 +531,16 @@ static int __devinit cpm_i2c_setup(struct cpm_i2c *cpm) rbdf = cpm->rbase; for (i = 0; i < CPM_MAXBD; i++) { - cpm->rxbuf[i] = dma_alloc_coherent( - NULL, CPM_MAX_READ + 1, &cpm->rxdma[i], GFP_KERNEL); + cpm->rxbuf[i] = dma_alloc_coherent(&cpm->ofdev->dev, + CPM_MAX_READ + 1, + &cpm->rxdma[i], GFP_KERNEL); if (!cpm->rxbuf[i]) { ret = -ENOMEM; goto out_muram; } out_be32(&rbdf[i].cbd_bufaddr, ((cpm->rxdma[i] + 1) & ~1)); - cpm->txbuf[i] = (unsigned char *)dma_alloc_coherent( - NULL, CPM_MAX_READ + 1, &cpm->txdma[i], GFP_KERNEL); + cpm->txbuf[i] = (unsigned char *)dma_alloc_coherent(&cpm->ofdev->dev, CPM_MAX_READ + 1, &cpm->txdma[i], GFP_KERNEL); if (!cpm->txbuf[i]) { ret = -ENOMEM; goto out_muram; @@ -585,10 +585,10 @@ static int __devinit cpm_i2c_setup(struct cpm_i2c *cpm) out_muram: for (i = 0; i < CPM_MAXBD; i++) { if (cpm->rxbuf[i]) - dma_free_coherent(NULL, CPM_MAX_READ + 1, + dma_free_coherent(&cpm->ofdev->dev, CPM_MAX_READ + 1, cpm->rxbuf[i], cpm->rxdma[i]); if (cpm->txbuf[i]) - dma_free_coherent(NULL, CPM_MAX_READ + 1, + dma_free_coherent(&cpm->ofdev->dev, CPM_MAX_READ + 1, cpm->txbuf[i], cpm->txdma[i]); } cpm_muram_free(cpm->dp_addr); @@ -619,9 +619,9 @@ static void cpm_i2c_shutdown(struct cpm_i2c *cpm) /* Free all memory */ for (i = 0; i < CPM_MAXBD; i++) { - dma_free_coherent(NULL, CPM_MAX_READ + 1, + dma_free_coherent(&cpm->ofdev->dev, CPM_MAX_READ + 1, cpm->rxbuf[i], cpm->rxdma[i]); - dma_free_coherent(NULL, CPM_MAX_READ + 1, + dma_free_coherent(&cpm->ofdev->dev, CPM_MAX_READ + 1, cpm->txbuf[i], cpm->txdma[i]); } diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c index 4af5c09f0e8..dd778d7ae04 100644 --- a/drivers/i2c/busses/i2c-mpc.c +++ b/drivers/i2c/busses/i2c-mpc.c @@ -164,7 +164,7 @@ static int i2c_wait(struct mpc_i2c *i2c, unsigned timeout, int writing) return 0; } -#ifdef CONFIG_PPC_52xx +#ifdef CONFIG_PPC_MPC52xx static const struct mpc_i2c_divider mpc_i2c_dividers_52xx[] = { {20, 0x20}, {22, 0x21}, {24, 0x22}, {26, 0x23}, {28, 0x24}, {30, 0x01}, {32, 0x25}, {34, 0x02}, @@ -188,7 +188,7 @@ static const struct mpc_i2c_divider mpc_i2c_dividers_52xx[] = { int mpc_i2c_get_fdr_52xx(struct device_node *node, u32 clock, int prescaler) { - const struct mpc52xx_i2c_divider *div = NULL; + const struct mpc_i2c_divider *div = NULL; unsigned int pvr = mfspr(SPRN_PVR); u32 divider; int i; @@ -203,7 +203,7 @@ int mpc_i2c_get_fdr_52xx(struct device_node *node, u32 clock, int prescaler) * We want to choose an FDR/DFSR that generates an I2C bus speed that * is equal to or lower than the requested speed. */ - for (i = 0; i < ARRAY_SIZE(mpc52xx_i2c_dividers); i++) { + for (i = 0; i < ARRAY_SIZE(mpc_i2c_dividers_52xx); i++) { div = &mpc_i2c_dividers_52xx[i]; /* Old MPC5200 rev A CPUs do not support the high bits */ if (div->fdr & 0xc0 && pvr == 0x80822011) @@ -219,20 +219,23 @@ static void mpc_i2c_setclock_52xx(struct device_node *node, struct mpc_i2c *i2c, u32 clock, u32 prescaler) { - int fdr = mpc52xx_i2c_get_fdr(node, clock, prescaler); + int ret, fdr; + + ret = mpc_i2c_get_fdr_52xx(node, clock, prescaler); + fdr = (ret >= 0) ? ret : 0x3f; /* backward compatibility */ - if (fdr < 0) - fdr = 0x3f; /* backward compatibility */ writeb(fdr & 0xff, i2c->base + MPC_I2C_FDR); - dev_info(i2c->dev, "clock %d Hz (fdr=%d)\n", clock, fdr); + + if (ret >= 0) + dev_info(i2c->dev, "clock %d Hz (fdr=%d)\n", clock, fdr); } -#else /* !CONFIG_PPC_52xx */ +#else /* !CONFIG_PPC_MPC52xx */ static void mpc_i2c_setclock_52xx(struct device_node *node, struct mpc_i2c *i2c, u32 clock, u32 prescaler) { } -#endif /* CONFIG_PPC_52xx*/ +#endif /* CONFIG_PPC_MPC52xx*/ #ifdef CONFIG_FSL_SOC static const struct mpc_i2c_divider mpc_i2c_dividers_8xxx[] = { @@ -321,14 +324,17 @@ static void mpc_i2c_setclock_8xxx(struct device_node *node, struct mpc_i2c *i2c, u32 clock, u32 prescaler) { - int fdr = mpc_i2c_get_fdr_8xxx(node, clock, prescaler); + int ret, fdr; + + ret = mpc_i2c_get_fdr_8xxx(node, clock, prescaler); + fdr = (ret >= 0) ? ret : 0x1031; /* backward compatibility */ - if (fdr < 0) - fdr = 0x1031; /* backward compatibility */ writeb(fdr & 0xff, i2c->base + MPC_I2C_FDR); writeb((fdr >> 8) & 0xff, i2c->base + MPC_I2C_DFSRR); - dev_info(i2c->dev, "clock %d Hz (dfsrr=%d fdr=%d)\n", - clock, fdr >> 8, fdr & 0xff); + + if (ret >= 0) + dev_info(i2c->dev, "clock %d Hz (dfsrr=%d fdr=%d)\n", + clock, fdr >> 8, fdr & 0xff); } #else /* !CONFIG_FSL_SOC */ diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c index c1405c8f6ba..acc7143d965 100644 --- a/drivers/i2c/busses/i2c-pxa.c +++ b/drivers/i2c/busses/i2c-pxa.c @@ -265,10 +265,10 @@ static int i2c_pxa_wait_bus_not_busy(struct pxa_i2c *i2c) show_state(i2c); } - if (timeout <= 0) + if (timeout < 0) show_state(i2c); - return timeout <= 0 ? I2C_RETRY : 0; + return timeout < 0 ? I2C_RETRY : 0; } static int i2c_pxa_wait_master(struct pxa_i2c *i2c) @@ -612,7 +612,7 @@ static int i2c_pxa_pio_set_master(struct pxa_i2c *i2c) show_state(i2c); } - if (timeout <= 0) { + if (timeout < 0) { show_state(i2c); dev_err(&i2c->adap.dev, "i2c_pxa: timeout waiting for bus free\n"); diff --git a/drivers/ide/icside.c b/drivers/ide/icside.c index 4e16ce68b06..36da913cc55 100644 --- a/drivers/ide/icside.c +++ b/drivers/ide/icside.c @@ -466,7 +466,7 @@ icside_register_v6(struct icside_state *state, struct expansion_card *ec) struct ide_host *host; unsigned int sel = 0; int ret; - hw_regs_t hw[2], *hws[] = { &hw[0], NULL, NULL, NULL }; + hw_regs_t hw[2], *hws[] = { &hw[0], &hw[1], NULL, NULL }; struct ide_port_info d = icside_v6_port_info; ioc_base = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0); diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c index 35dc38d3b2c..6415a2e2ba8 100644 --- a/drivers/ide/ide-io.c +++ b/drivers/ide/ide-io.c @@ -696,7 +696,7 @@ void ide_timer_expiry (unsigned long data) } spin_lock_irq(&hwif->lock); enable_irq(hwif->irq); - if (startstop == ide_stopped) { + if (startstop == ide_stopped && hwif->polling == 0) { ide_unlock_port(hwif); plug_device = 1; } @@ -868,7 +868,7 @@ irqreturn_t ide_intr (int irq, void *dev_id) * same irq as is currently being serviced here, and Linux * won't allow another of the same (on any CPU) until we return. */ - if (startstop == ide_stopped) { + if (startstop == ide_stopped && hwif->polling == 0) { BUG_ON(hwif->handler); ide_unlock_port(hwif); plug_device = 1; diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c index c19a221b1e1..06fe002116e 100644 --- a/drivers/ide/ide-iops.c +++ b/drivers/ide/ide-iops.c @@ -206,8 +206,6 @@ EXPORT_SYMBOL_GPL(ide_in_drive_list); /* * Early UDMA66 devices don't set bit14 to 1, only bit13 is valid. - * We list them here and depend on the device side cable detection for them. - * * Some optical devices with the buggy firmwares have the same problem. */ static const struct drive_list_entry ivb_list[] = { @@ -251,10 +249,25 @@ u8 eighty_ninty_three(ide_drive_t *drive) * - force bit13 (80c cable present) check also for !ivb devices * (unless the slave device is pre-ATA3) */ - if ((id[ATA_ID_HW_CONFIG] & 0x4000) || - (ivb && (id[ATA_ID_HW_CONFIG] & 0x2000))) + if (id[ATA_ID_HW_CONFIG] & 0x4000) return 1; + if (ivb) { + const char *model = (char *)&id[ATA_ID_PROD]; + + if (strstr(model, "TSSTcorp CDDVDW SH-S202")) { + /* + * These ATAPI devices always report 80c cable + * so we have to depend on the host in this case. + */ + if (hwif->cbl == ATA_CBL_PATA80) + return 1; + } else { + /* Depend on the device side cable detection. */ + if (id[ATA_ID_HW_CONFIG] & 0x2000) + return 1; + } + } no_80w: if (drive->dev_flags & IDE_DFLAG_UDMA33_WARNED) return 0; diff --git a/drivers/ide/ide-lib.c b/drivers/ide/ide-lib.c index 56ff8c46c7d..2148df836ce 100644 --- a/drivers/ide/ide-lib.c +++ b/drivers/ide/ide-lib.c @@ -31,24 +31,6 @@ void ide_toggle_bounce(ide_drive_t *drive, int on) blk_queue_bounce_limit(drive->queue, addr); } -static void ide_dump_opcode(ide_drive_t *drive) -{ - struct request *rq = drive->hwif->rq; - struct ide_cmd *cmd = NULL; - - if (!rq) - return; - - if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) - cmd = rq->special; - - printk(KERN_ERR "ide: failed opcode was: "); - if (cmd == NULL) - printk(KERN_CONT "unknown\n"); - else - printk(KERN_CONT "0x%02x\n", cmd->tf.command); -} - u64 ide_get_lba_addr(struct ide_cmd *cmd, int lba48) { struct ide_taskfile *tf = &cmd->tf; @@ -91,7 +73,7 @@ static void ide_dump_sector(ide_drive_t *drive) static void ide_dump_ata_error(ide_drive_t *drive, u8 err) { - printk(KERN_ERR "{ "); + printk(KERN_CONT "{ "); if (err & ATA_ABORTED) printk(KERN_CONT "DriveStatusError "); if (err & ATA_ICRC) @@ -121,7 +103,7 @@ static void ide_dump_ata_error(ide_drive_t *drive, u8 err) static void ide_dump_atapi_error(ide_drive_t *drive, u8 err) { - printk(KERN_ERR "{ "); + printk(KERN_CONT "{ "); if (err & ATAPI_ILI) printk(KERN_CONT "IllegalLengthIndication "); if (err & ATAPI_EOM) @@ -179,7 +161,10 @@ u8 ide_dump_status(ide_drive_t *drive, const char *msg, u8 stat) else ide_dump_atapi_error(drive, err); } - ide_dump_opcode(drive); + + printk(KERN_ERR "%s: possibly failed opcode: 0x%02x\n", + drive->name, drive->hwif->cmd.tf.command); + return err; } EXPORT_SYMBOL(ide_dump_status); diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c index 61111fd2713..39d4e01f5c9 100644 --- a/drivers/ide/ide-pci-generic.c +++ b/drivers/ide/ide-pci-generic.c @@ -33,6 +33,16 @@ static int ide_generic_all; /* Set to claim all devices */ module_param_named(all_generic_ide, ide_generic_all, bool, 0444); MODULE_PARM_DESC(all_generic_ide, "IDE generic will claim all unknown PCI IDE storage controllers."); +static void netcell_quirkproc(ide_drive_t *drive) +{ + /* mark words 85-87 as valid */ + drive->id[ATA_ID_CSF_DEFAULT] |= 0x4000; +} + +static const struct ide_port_ops netcell_port_ops = { + .quirkproc = netcell_quirkproc, +}; + #define DECLARE_GENERIC_PCI_DEV(extra_flags) \ { \ .name = DRV_NAME, \ @@ -74,6 +84,7 @@ static const struct ide_port_info generic_chipsets[] __devinitdata = { { /* 6: Revolution */ .name = DRV_NAME, + .port_ops = &netcell_port_ops, .host_flags = IDE_HFLAG_CLEAR_SIMPLEX | IDE_HFLAG_TRUST_BIOS_FOR_DMA | IDE_HFLAG_OFF_BOARD, diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c index 7f264ed1141..c895ed52b2e 100644 --- a/drivers/ide/ide-probe.c +++ b/drivers/ide/ide-probe.c @@ -295,7 +295,7 @@ int ide_dev_read_id(ide_drive_t *drive, u8 cmd, u16 *id) timeout = ((cmd == ATA_CMD_ID_ATA) ? WAIT_WORSTCASE : WAIT_PIDENTIFY) / 2; - if (ide_busy_sleep(hwif, timeout, use_altstatus)) + if (ide_busy_sleep(drive, timeout, use_altstatus)) return 1; /* wait for IRQ and ATA_DRQ */ @@ -316,8 +316,9 @@ int ide_dev_read_id(ide_drive_t *drive, u8 cmd, u16 *id) return rc; } -int ide_busy_sleep(ide_hwif_t *hwif, unsigned long timeout, int altstatus) +int ide_busy_sleep(ide_drive_t *drive, unsigned long timeout, int altstatus) { + ide_hwif_t *hwif = drive->hwif; u8 stat; timeout += jiffies; @@ -330,6 +331,8 @@ int ide_busy_sleep(ide_hwif_t *hwif, unsigned long timeout, int altstatus) return 0; } while (time_before(jiffies, timeout)); + printk(KERN_ERR "%s: timeout in %s\n", drive->name, __func__); + return 1; /* drive timed-out */ } @@ -420,7 +423,7 @@ static int do_probe (ide_drive_t *drive, u8 cmd) tp_ops->dev_select(drive); msleep(50); tp_ops->exec_command(hwif, ATA_CMD_DEV_RESET); - (void)ide_busy_sleep(hwif, WAIT_WORSTCASE, 0); + (void)ide_busy_sleep(drive, WAIT_WORSTCASE, 0); rc = ide_dev_read_id(drive, cmd, id); } diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c index cb942a9b580..3a53e0834cf 100644 --- a/drivers/ide/ide-tape.c +++ b/drivers/ide/ide-tape.c @@ -614,12 +614,6 @@ static ide_startstop_t ide_tape_issue_pc(ide_drive_t *drive, { idetape_tape_t *tape = drive->driver_data; - if (drive->pc->c[0] == REQUEST_SENSE && - pc->c[0] == REQUEST_SENSE) { - printk(KERN_ERR "ide-tape: possible ide-tape.c bug - " - "Two request sense in serial were issued\n"); - } - if (drive->failed_pc == NULL && pc->c[0] != REQUEST_SENSE) drive->failed_pc = pc; diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c index 248a54bd238..b3bc96f930a 100644 --- a/drivers/ide/pdc202xx_old.c +++ b/drivers/ide/pdc202xx_old.c @@ -1,6 +1,6 @@ /* * Copyright (C) 1998-2002 Andre Hedrick - * Copyright (C) 2006-2007 MontaVista Software, Inc. + * Copyright (C) 2006-2007, 2009 MontaVista Software, Inc. * Copyright (C) 2007 Bartlomiej Zolnierkiewicz * * Portions Copyright (C) 1999 Promise Technology, Inc. @@ -227,28 +227,19 @@ somebody_else: return (dma_stat & 4) == 4; /* return 1 if INTR asserted */ } -static void pdc202xx_reset_host (ide_hwif_t *hwif) +static void pdc202xx_reset(ide_drive_t *drive) { + ide_hwif_t *hwif = drive->hwif; unsigned long high_16 = hwif->extra_base - 16; u8 udma_speed_flag = inb(high_16 | 0x001f); + printk(KERN_WARNING "PDC202xx: software reset...\n"); + outb(udma_speed_flag | 0x10, high_16 | 0x001f); mdelay(100); outb(udma_speed_flag & ~0x10, high_16 | 0x001f); mdelay(2000); /* 2 seconds ?! */ - printk(KERN_WARNING "PDC202XX: %s channel reset.\n", - hwif->channel ? "Secondary" : "Primary"); -} - -static void pdc202xx_reset (ide_drive_t *drive) -{ - ide_hwif_t *hwif = drive->hwif; - ide_hwif_t *mate = hwif->mate; - - pdc202xx_reset_host(hwif); - pdc202xx_reset_host(mate); - ide_set_max_pio(drive); } @@ -328,9 +319,8 @@ static const struct ide_dma_ops pdc20246_dma_ops = { .dma_start = ide_dma_start, .dma_end = ide_dma_end, .dma_test_irq = pdc202xx_dma_test_irq, - .dma_lost_irq = pdc202xx_dma_lost_irq, + .dma_lost_irq = ide_dma_lost_irq, .dma_timer_expiry = ide_dma_sff_timer_expiry, - .dma_clear = pdc202xx_reset, .dma_sff_read_status = ide_dma_sff_read_status, }; diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c index 2aa69993306..69860dea382 100644 --- a/drivers/ide/piix.c +++ b/drivers/ide/piix.c @@ -263,6 +263,7 @@ static const struct ich_laptop ich_laptop[] = { { 0x24CA, 0x1025, 0x003d }, /* ICH4 on ACER TM290 */ { 0x266F, 0x1025, 0x0066 }, /* ICH6 on ACER Aspire 1694WLMi */ { 0x2653, 0x1043, 0x82D8 }, /* ICH6M on Asus Eee 701 */ + { 0x27df, 0x104d, 0x900e }, /* ICH7 on Sony TZ-90 */ /* end marker */ { 0, } }; diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c index 3ff7231e485..028de26a25f 100644 --- a/drivers/ide/via82cxxx.c +++ b/drivers/ide/via82cxxx.c @@ -67,6 +67,7 @@ static struct via_isa_bridge { u8 udma_mask; u8 flags; } via_isa_bridges[] = { + { "vx855", PCI_DEVICE_ID_VIA_VX855, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST }, { "vx800", PCI_DEVICE_ID_VIA_VX800, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST }, { "cx700", PCI_DEVICE_ID_VIA_CX700, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST }, { "vt8237s", PCI_DEVICE_ID_VIA_8237S, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST }, @@ -474,6 +475,7 @@ static const struct pci_device_id via_pci_tbl[] = { { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_82C576_1), 0 }, { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_82C586_1), 0 }, { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_CX700_IDE), 0 }, + { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_VX855_IDE), 0 }, { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_6410), 1 }, { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_SATA_EIDE), 1 }, { 0, }, diff --git a/drivers/idle/i7300_idle.c b/drivers/idle/i7300_idle.c index bf740394d70..949c97ff57e 100644 --- a/drivers/idle/i7300_idle.c +++ b/drivers/idle/i7300_idle.c @@ -41,6 +41,10 @@ static int debug; module_param_named(debug, debug, uint, 0644); MODULE_PARM_DESC(debug, "Enable debug printks in this driver"); +static int forceload; +module_param_named(forceload, forceload, uint, 0644); +MODULE_PARM_DESC(debug, "Enable driver testing on unvalidated i5000"); + #define dprintk(fmt, arg...) \ do { if (debug) printk(KERN_INFO I7300_PRINT fmt, ##arg); } while (0) @@ -552,7 +556,7 @@ static int __init i7300_idle_init(void) cpus_clear(idle_cpumask); total_us = 0; - if (i7300_idle_platform_probe(&fbd_dev, &ioat_dev)) + if (i7300_idle_platform_probe(&fbd_dev, &ioat_dev, forceload)) return -ENODEV; if (i7300_idle_thrt_save()) diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c index 8d71086f5a1..62f9cf2f94e 100644 --- a/drivers/infiniband/hw/cxgb3/cxio_hal.c +++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c @@ -410,6 +410,7 @@ int cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count) ptr = wq->sq_rptr + count; sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2); while (ptr != wq->sq_wptr) { + sqp->signaled = 0; insert_sq_cqe(wq, cq, sqp); ptr++; sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2); diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c index 8dc2bb78160..b3684060465 100644 --- a/drivers/infiniband/hw/ipath/ipath_fs.c +++ b/drivers/infiniband/hw/ipath/ipath_fs.c @@ -347,7 +347,7 @@ static int ipathfs_fill_super(struct super_block *sb, void *data, spin_unlock_irqrestore(&ipath_devs_lock, flags); ret = create_device_files(sb, dd); if (ret) { - deactivate_super(sb); + deactivate_locked_super(sb); goto bail; } spin_lock_irqsave(&ipath_devs_lock, flags); diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index 9974e886b8d..8a7dd6795fa 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h @@ -86,6 +86,7 @@ struct mlx4_ib_mr { struct mlx4_ib_fast_reg_page_list { struct ib_fast_reg_page_list ibfrpl; + __be64 *mapped_page_list; dma_addr_t map; }; diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c index 8e4d26d56a9..8f3666b20ea 100644 --- a/drivers/infiniband/hw/mlx4/mr.c +++ b/drivers/infiniband/hw/mlx4/mr.c @@ -231,7 +231,11 @@ struct ib_fast_reg_page_list *mlx4_ib_alloc_fast_reg_page_list(struct ib_device if (!mfrpl) return ERR_PTR(-ENOMEM); - mfrpl->ibfrpl.page_list = dma_alloc_coherent(&dev->dev->pdev->dev, + mfrpl->ibfrpl.page_list = kmalloc(size, GFP_KERNEL); + if (!mfrpl->ibfrpl.page_list) + goto err_free; + + mfrpl->mapped_page_list = dma_alloc_coherent(&dev->dev->pdev->dev, size, &mfrpl->map, GFP_KERNEL); if (!mfrpl->ibfrpl.page_list) @@ -242,6 +246,7 @@ struct ib_fast_reg_page_list *mlx4_ib_alloc_fast_reg_page_list(struct ib_device return &mfrpl->ibfrpl; err_free: + kfree(mfrpl->ibfrpl.page_list); kfree(mfrpl); return ERR_PTR(-ENOMEM); } @@ -252,8 +257,9 @@ void mlx4_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list) struct mlx4_ib_fast_reg_page_list *mfrpl = to_mfrpl(page_list); int size = page_list->max_page_list_len * sizeof (u64); - dma_free_coherent(&dev->dev->pdev->dev, size, page_list->page_list, + dma_free_coherent(&dev->dev->pdev->dev, size, mfrpl->mapped_page_list, mfrpl->map); + kfree(mfrpl->ibfrpl.page_list); kfree(mfrpl); } diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index f385a24d31d..20724aee76f 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -1365,7 +1365,7 @@ static void set_fmr_seg(struct mlx4_wqe_fmr_seg *fseg, struct ib_send_wr *wr) int i; for (i = 0; i < wr->wr.fast_reg.page_list_len; ++i) - wr->wr.fast_reg.page_list->page_list[i] = + mfrpl->mapped_page_list[i] = cpu_to_be64(wr->wr.fast_reg.page_list->page_list[i] | MLX4_MTT_FLAG_PRESENT); diff --git a/drivers/input/ff-memless.c b/drivers/input/ff-memless.c index bc4e40f3ede..2d1415e1683 100644 --- a/drivers/input/ff-memless.c +++ b/drivers/input/ff-memless.c @@ -226,7 +226,7 @@ static int get_compatible_type(struct ff_device *ff, int effect_type) */ static void ml_combine_effects(struct ff_effect *effect, struct ml_effect_state *state, - int gain) + unsigned int gain) { struct ff_effect *new = state->effect; unsigned int strong, weak, i; diff --git a/drivers/input/input.c b/drivers/input/input.c index e54e002665b..5d445f48789 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c @@ -42,6 +42,7 @@ static unsigned int input_abs_bypass_init_data[] __initdata = { ABS_MT_POSITION_Y, ABS_MT_TOOL_TYPE, ABS_MT_BLOB_ID, + ABS_MT_TRACKING_ID, 0 }; static unsigned long input_abs_bypass[BITS_TO_LONGS(ABS_CNT)]; diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c index 15bb41195be..0e12f89276a 100644 --- a/drivers/input/joydev.c +++ b/drivers/input/joydev.c @@ -843,7 +843,13 @@ static const struct input_device_id joydev_blacklist[] = { INPUT_DEVICE_ID_MATCH_KEYBIT, .evbit = { BIT_MASK(EV_KEY) }, .keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) }, - }, /* Avoid itouchpads, touchscreens and tablets */ + }, /* Avoid itouchpads and touchscreens */ + { + .flags = INPUT_DEVICE_ID_MATCH_EVBIT | + INPUT_DEVICE_ID_MATCH_KEYBIT, + .evbit = { BIT_MASK(EV_KEY) }, + .keybit = { [BIT_WORD(BTN_DIGI)] = BIT_MASK(BTN_DIGI) }, + }, /* Avoid tablets, digitisers and similar devices */ { } /* Terminating entry */ }; diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c index 444dec07e5d..df3f8aa6811 100644 --- a/drivers/input/keyboard/atkbd.c +++ b/drivers/input/keyboard/atkbd.c @@ -894,6 +894,13 @@ static unsigned int atkbd_amilo_pa1510_forced_release_keys[] = { 0xb0, 0xae, -1U }; +/* + * Amilo Xi 3650 key release for light touch bar not working + */ +static unsigned int atkbd_amilo_xi3650_forced_release_keys[] = { + 0x67, 0xed, 0x90, 0xa2, 0x99, 0xa4, 0xae, 0xb0, -1U +}; + /* * atkbd_set_keycode_table() initializes keyboard's keycode table * according to the selected scancode set @@ -1560,6 +1567,15 @@ static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = { .callback = atkbd_setup_forced_release, .driver_data = atkbd_amilo_pa1510_forced_release_keys, }, + { + .ident = "Fujitsu Amilo Xi 3650", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), + DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Xi 3650"), + }, + .callback = atkbd_setup_forced_release, + .driver_data = atkbd_amilo_xi3650_forced_release_keys, + }, { } }; diff --git a/drivers/input/serio/ambakmi.c b/drivers/input/serio/ambakmi.c index e29cdc13a19..a28c06d686e 100644 --- a/drivers/input/serio/ambakmi.c +++ b/drivers/input/serio/ambakmi.c @@ -107,7 +107,7 @@ static void amba_kmi_close(struct serio *io) clk_disable(kmi->clk); } -static int amba_kmi_probe(struct amba_device *dev, void *id) +static int amba_kmi_probe(struct amba_device *dev, struct amba_id *id) { struct amba_kmi_port *kmi; struct serio *io; diff --git a/drivers/input/serio/libps2.c b/drivers/input/serio/libps2.c index 67248c31e19..be5bbbb8ae4 100644 --- a/drivers/input/serio/libps2.c +++ b/drivers/input/serio/libps2.c @@ -210,7 +210,7 @@ int ps2_command(struct ps2dev *ps2dev, unsigned char *param, int command) timeout = wait_event_timeout(ps2dev->wait, !(ps2dev->flags & PS2_FLAG_CMD1), timeout); - if (ps2dev->cmdcnt && timeout > 0) { + if (ps2dev->cmdcnt && !(ps2dev->flags & PS2_FLAG_CMD1)) { timeout = ps2_adjust_timeout(ps2dev, command, timeout); wait_event_timeout(ps2dev->wait, diff --git a/drivers/input/touchscreen/tsc2007.c b/drivers/input/touchscreen/tsc2007.c index edd4f64d1f4..880f58c6a7c 100644 --- a/drivers/input/touchscreen/tsc2007.c +++ b/drivers/input/touchscreen/tsc2007.c @@ -200,8 +200,9 @@ static int tsc2007_read_values(struct tsc2007 *tsc) static enum hrtimer_restart tsc2007_timer(struct hrtimer *handle) { struct tsc2007 *ts = container_of(handle, struct tsc2007, timer); + unsigned long flags; - spin_lock_irq(&ts->lock); + spin_lock_irqsave(&ts->lock, flags); if (unlikely(!ts->get_pendown_state() && ts->pendown)) { struct input_dev *input = ts->input; @@ -222,7 +223,7 @@ static enum hrtimer_restart tsc2007_timer(struct hrtimer *handle) tsc2007_send_event(ts); } - spin_unlock_irq(&ts->lock); + spin_unlock_irqrestore(&ts->lock, flags); return HRTIMER_NORESTART; } diff --git a/drivers/input/touchscreen/ucb1400_ts.c b/drivers/input/touchscreen/ucb1400_ts.c index f100c7f4c1d..6954f550010 100644 --- a/drivers/input/touchscreen/ucb1400_ts.c +++ b/drivers/input/touchscreen/ucb1400_ts.c @@ -419,7 +419,7 @@ static int ucb1400_ts_remove(struct platform_device *dev) #ifdef CONFIG_PM static int ucb1400_ts_resume(struct platform_device *dev) { - struct ucb1400_ts *ucb = platform_get_drvdata(dev); + struct ucb1400_ts *ucb = dev->dev.platform_data; if (ucb->ts_task) { /* diff --git a/drivers/isdn/capi/capifs.c b/drivers/isdn/capi/capifs.c index b129409925a..bff72d81f26 100644 --- a/drivers/isdn/capi/capifs.c +++ b/drivers/isdn/capi/capifs.c @@ -75,15 +75,17 @@ static int capifs_remount(struct super_block *s, int *flags, char *data) } } - kfree(s->s_options); - s->s_options = new_opt; + mutex_lock(&s->s_root->d_inode->i_mutex); + replace_mount_options(s, new_opt); config.setuid = setuid; config.setgid = setgid; config.uid = uid; config.gid = gid; config.mode = mode; + mutex_unlock(&s->s_root->d_inode->i_mutex); + return 0; } @@ -154,13 +156,16 @@ void capifs_new_ncci(unsigned int number, dev_t device) if (!inode) return; inode->i_ino = number+2; + + dentry = get_node(number); + + /* config contents is protected by root's i_mutex */ inode->i_uid = config.setuid ? config.uid : current_fsuid(); inode->i_gid = config.setgid ? config.gid : current_fsgid(); inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; init_special_inode(inode, S_IFCHR|config.mode, device); //inode->i_op = &capifs_file_inode_operations; - dentry = get_node(number); if (!IS_ERR(dentry) && !dentry->d_inode) d_instantiate(dentry, inode); mutex_unlock(&capifs_root->d_inode->i_mutex); diff --git a/drivers/isdn/gigaset/isocdata.c b/drivers/isdn/gigaset/isocdata.c index b171e75cb52..29808c4fb1c 100644 --- a/drivers/isdn/gigaset/isocdata.c +++ b/drivers/isdn/gigaset/isocdata.c @@ -175,7 +175,7 @@ int gigaset_isowbuf_getbytes(struct isowbuf_t *iwb, int size) return -EINVAL; } src = iwb->read; - if (unlikely(limit > BAS_OUTBUFSIZE + BAS_OUTBUFPAD || + if (unlikely(limit >= BAS_OUTBUFSIZE + BAS_OUTBUFPAD || (read < src && limit >= src))) { pr_err("isoc write buffer frame reservation violated\n"); return -EFAULT; diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c index 1a83910f674..eaf722fe309 100644 --- a/drivers/lguest/x86/core.c +++ b/drivers/lguest/x86/core.c @@ -358,6 +358,16 @@ void lguest_arch_handle_trap(struct lg_cpu *cpu) if (emulate_insn(cpu)) return; } + /* If KVM is active, the vmcall instruction triggers a + * General Protection Fault. Normally it triggers an + * invalid opcode fault (6): */ + case 6: + /* We need to check if ring == GUEST_PL and + * faulting instruction == vmcall. */ + if (is_hypercall(cpu)) { + rewrite_hypercall(cpu); + return; + } break; case 14: /* We've intercepted a Page Fault. */ /* The Guest accessed a virtual address that wasn't mapped. @@ -403,15 +413,6 @@ void lguest_arch_handle_trap(struct lg_cpu *cpu) * up the pointer now to indicate a hypercall is pending. */ cpu->hcall = (struct hcall_args *)cpu->regs; return; - case 6: - /* kvm hypercalls trigger an invalid opcode fault (6). - * We need to check if ring == GUEST_PL and - * faulting instruction == vmcall. */ - if (is_hypercall(cpu)) { - rewrite_hypercall(cpu); - return; - } - break; } /* We didn't handle the trap, so it needs to go to the Guest. */ diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index 47c68bc75a1..56df1cee8fb 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -1097,14 +1097,12 @@ void bitmap_daemon_work(struct bitmap *bitmap) } bitmap->allclean = 1; + spin_lock_irqsave(&bitmap->lock, flags); for (j = 0; j < bitmap->chunks; j++) { bitmap_counter_t *bmc; - spin_lock_irqsave(&bitmap->lock, flags); - if (!bitmap->filemap) { + if (!bitmap->filemap) /* error or shutdown */ - spin_unlock_irqrestore(&bitmap->lock, flags); break; - } page = filemap_get_page(bitmap, j); @@ -1121,6 +1119,8 @@ void bitmap_daemon_work(struct bitmap *bitmap) write_page(bitmap, page, 0); bitmap->allclean = 0; } + spin_lock_irqsave(&bitmap->lock, flags); + j |= (PAGE_BITS - 1); continue; } @@ -1181,9 +1181,10 @@ void bitmap_daemon_work(struct bitmap *bitmap) ext2_clear_bit(file_page_offset(j), paddr); kunmap_atomic(paddr, KM_USER0); } - } - spin_unlock_irqrestore(&bitmap->lock, flags); + } else + j |= PAGE_COUNTER_MASK; } + spin_unlock_irqrestore(&bitmap->lock, flags); /* now sync the final page */ if (lastpage != NULL) { diff --git a/drivers/md/md.c b/drivers/md/md.c index fccc8343a25..641b211fe3f 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -1375,6 +1375,9 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev) sb->raid_disks = cpu_to_le32(mddev->raid_disks); sb->size = cpu_to_le64(mddev->dev_sectors); + sb->chunksize = cpu_to_le32(mddev->chunk_size >> 9); + sb->level = cpu_to_le32(mddev->level); + sb->layout = cpu_to_le32(mddev->layout); if (mddev->bitmap && mddev->bitmap_file == NULL) { sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_offset); @@ -3303,7 +3306,9 @@ static ssize_t action_show(mddev_t *mddev, char *page) { char *type = "idle"; - if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || + if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) + type = "frozen"; + else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))) { if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) type = "reshape"; @@ -3326,7 +3331,12 @@ action_store(mddev_t *mddev, const char *page, size_t len) if (!mddev->pers || !mddev->pers->sync_request) return -EINVAL; - if (cmd_match(page, "idle")) { + if (cmd_match(page, "frozen")) + set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); + else + clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); + + if (cmd_match(page, "idle") || cmd_match(page, "frozen")) { if (mddev->sync_thread) { set_bit(MD_RECOVERY_INTR, &mddev->recovery); md_unregister_thread(mddev->sync_thread); @@ -3680,7 +3690,7 @@ array_size_store(mddev_t *mddev, const char *buf, size_t len) if (strict_blocks_to_sectors(buf, §ors) < 0) return -EINVAL; if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors) - return -EINVAL; + return -E2BIG; mddev->external_size = 1; } @@ -5557,7 +5567,7 @@ static struct block_device_operations md_fops = .owner = THIS_MODULE, .open = md_open, .release = md_release, - .locked_ioctl = md_ioctl, + .ioctl = md_ioctl, .getgeo = md_getgeo, .media_changed = md_media_changed, .revalidate_disk= md_revalidate, @@ -6352,12 +6362,13 @@ void md_do_sync(mddev_t *mddev) skipped = 0; - if ((mddev->curr_resync > mddev->curr_resync_completed && - (mddev->curr_resync - mddev->curr_resync_completed) - > (max_sectors >> 4)) || - (j - mddev->curr_resync_completed)*2 - >= mddev->resync_max - mddev->curr_resync_completed - ) { + if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && + ((mddev->curr_resync > mddev->curr_resync_completed && + (mddev->curr_resync - mddev->curr_resync_completed) + > (max_sectors >> 4)) || + (j - mddev->curr_resync_completed)*2 + >= mddev->resync_max - mddev->curr_resync_completed + )) { /* time to update curr_resync_completed */ blk_unplug(mddev->queue); wait_event(mddev->recovery_wait, diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 4616bc3a6e7..bb37fb1b2d8 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -362,7 +362,7 @@ static void raid5_unplug_device(struct request_queue *q); static struct stripe_head * get_active_stripe(raid5_conf_t *conf, sector_t sector, - int previous, int noblock) + int previous, int noblock, int noquiesce) { struct stripe_head *sh; @@ -372,7 +372,7 @@ get_active_stripe(raid5_conf_t *conf, sector_t sector, do { wait_event_lock_irq(conf->wait_for_stripe, - conf->quiesce == 0, + conf->quiesce == 0 || noquiesce, conf->device_lock, /* nothing */); sh = __find_stripe(conf, sector, conf->generation - previous); if (!sh) { @@ -2671,7 +2671,7 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh, sector_t bn = compute_blocknr(sh, i, 1); sector_t s = raid5_compute_sector(conf, bn, 0, &dd_idx, NULL); - sh2 = get_active_stripe(conf, s, 0, 1); + sh2 = get_active_stripe(conf, s, 0, 1, 1); if (sh2 == NULL) /* so far only the early blocks of this stripe * have been requested. When later blocks @@ -2944,7 +2944,7 @@ static bool handle_stripe5(struct stripe_head *sh) /* Finish reconstruct operations initiated by the expansion process */ if (sh->reconstruct_state == reconstruct_state_result) { struct stripe_head *sh2 - = get_active_stripe(conf, sh->sector, 1, 1); + = get_active_stripe(conf, sh->sector, 1, 1, 1); if (sh2 && test_bit(STRIPE_EXPAND_SOURCE, &sh2->state)) { /* sh cannot be written until sh2 has been read. * so arrange for sh to be delayed a little @@ -3189,7 +3189,7 @@ static bool handle_stripe6(struct stripe_head *sh, struct page *tmp_page) if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state)) { struct stripe_head *sh2 - = get_active_stripe(conf, sh->sector, 1, 1); + = get_active_stripe(conf, sh->sector, 1, 1, 1); if (sh2 && test_bit(STRIPE_EXPAND_SOURCE, &sh2->state)) { /* sh cannot be written until sh2 has been read. * so arrange for sh to be delayed a little @@ -3288,7 +3288,7 @@ static void unplug_slaves(mddev_t *mddev) int i; rcu_read_lock(); - for (i=0; iraid_disks; i++) { + for (i = 0; i < conf->raid_disks; i++) { mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev); if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) { struct request_queue *r_queue = bdev_get_queue(rdev->bdev); @@ -3675,7 +3675,7 @@ static int make_request(struct request_queue *q, struct bio * bi) (unsigned long long)logical_sector); sh = get_active_stripe(conf, new_sector, previous, - (bi->bi_rw&RWA_MASK)); + (bi->bi_rw&RWA_MASK), 0); if (sh) { if (unlikely(previous)) { /* expansion might have moved on while waiting for a @@ -3811,13 +3811,13 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped safepos = conf->reshape_safe; sector_div(safepos, data_disks); if (mddev->delta_disks < 0) { - writepos -= reshape_sectors; + writepos -= min_t(sector_t, reshape_sectors, writepos); readpos += reshape_sectors; safepos += reshape_sectors; } else { writepos += reshape_sectors; - readpos -= reshape_sectors; - safepos -= reshape_sectors; + readpos -= min_t(sector_t, reshape_sectors, readpos); + safepos -= min_t(sector_t, reshape_sectors, safepos); } /* 'writepos' is the most advanced device address we might write. @@ -3873,7 +3873,7 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped for (i = 0; i < reshape_sectors; i += STRIPE_SECTORS) { int j; int skipped = 0; - sh = get_active_stripe(conf, stripe_addr+i, 0, 0); + sh = get_active_stripe(conf, stripe_addr+i, 0, 0, 1); set_bit(STRIPE_EXPANDING, &sh->state); atomic_inc(&conf->reshape_stripes); /* If any of this stripe is beyond the end of the old @@ -3916,13 +3916,13 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped raid5_compute_sector(conf, stripe_addr*(new_data_disks), 1, &dd_idx, NULL); last_sector = - raid5_compute_sector(conf, ((stripe_addr+conf->chunk_size/512) + raid5_compute_sector(conf, ((stripe_addr+reshape_sectors) *(new_data_disks) - 1), 1, &dd_idx, NULL); if (last_sector >= mddev->dev_sectors) last_sector = mddev->dev_sectors - 1; while (first_sector <= last_sector) { - sh = get_active_stripe(conf, first_sector, 1, 0); + sh = get_active_stripe(conf, first_sector, 1, 0, 1); set_bit(STRIPE_EXPAND_SOURCE, &sh->state); set_bit(STRIPE_HANDLE, &sh->state); release_stripe(sh); @@ -4022,9 +4022,9 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski bitmap_cond_end_sync(mddev->bitmap, sector_nr); - sh = get_active_stripe(conf, sector_nr, 0, 1); + sh = get_active_stripe(conf, sector_nr, 0, 1, 0); if (sh == NULL) { - sh = get_active_stripe(conf, sector_nr, 0, 0); + sh = get_active_stripe(conf, sector_nr, 0, 0, 0); /* make sure we don't swamp the stripe cache if someone else * is trying to get access */ @@ -4034,7 +4034,7 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski * We don't need to check the 'failed' flag as when that gets set, * recovery aborts. */ - for (i=0; iraid_disks; i++) + for (i = 0; i < conf->raid_disks; i++) if (conf->disks[i].rdev == NULL) still_degraded = 1; @@ -4086,7 +4086,7 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio) /* already done this stripe */ continue; - sh = get_active_stripe(conf, sector, 0, 1); + sh = get_active_stripe(conf, sector, 0, 1, 0); if (!sh) { /* failed to get a stripe - must wait */ diff --git a/drivers/media/radio/radio-sf16fmi.c b/drivers/media/radio/radio-sf16fmi.c index 1dba8f0832a..5cf6c45b91f 100644 --- a/drivers/media/radio/radio-sf16fmi.c +++ b/drivers/media/radio/radio-sf16fmi.c @@ -153,7 +153,7 @@ static int vidioc_g_tuner(struct file *file, void *priv, mult = (fmi->flags & V4L2_TUNER_CAP_LOW) ? 1 : 1000; v->rangelow = RSF16_MINFREQ / mult; v->rangehigh = RSF16_MAXFREQ / mult; - v->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_MODE_STEREO; + v->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_STEREO; v->capability = fmi->flags & V4L2_TUNER_CAP_LOW; v->audmode = V4L2_TUNER_MODE_STEREO; v->signal = fmi_getsigstr(fmi); diff --git a/drivers/media/radio/radio-sf16fmr2.c b/drivers/media/radio/radio-sf16fmr2.c index c09ca8600ea..935ff9bcdfc 100644 --- a/drivers/media/radio/radio-sf16fmr2.c +++ b/drivers/media/radio/radio-sf16fmr2.c @@ -233,7 +233,7 @@ static int vidioc_g_tuner(struct file *file, void *priv, mult = (fmr2->flags & V4L2_TUNER_CAP_LOW) ? 1 : 1000; v->rangelow = RSF16_MINFREQ / mult; v->rangehigh = RSF16_MAXFREQ / mult; - v->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_MODE_STEREO; + v->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_STEREO; v->capability = fmr2->flags&V4L2_TUNER_CAP_LOW; v->audmode = fmr2->stereo ? V4L2_TUNER_MODE_STEREO: V4L2_TUNER_MODE_MONO; diff --git a/drivers/media/video/cafe_ccic.c b/drivers/media/video/cafe_ccic.c index 5f582726985..c4d181dde1c 100644 --- a/drivers/media/video/cafe_ccic.c +++ b/drivers/media/video/cafe_ccic.c @@ -774,6 +774,7 @@ static int cafe_cam_init(struct cafe_camera *cam) ret = __cafe_cam_reset(cam); if (ret) goto out; + chip.ident = V4L2_IDENT_NONE; chip.match.type = V4L2_CHIP_MATCH_I2C_ADDR; chip.match.addr = cam->sensor_addr; ret = sensor_call(cam, core, g_chip_ident, &chip); diff --git a/drivers/media/video/cx23885/cx23885-dvb.c b/drivers/media/video/cx23885/cx23885-dvb.c index 0c49a98213c..1dc070da865 100644 --- a/drivers/media/video/cx23885/cx23885-dvb.c +++ b/drivers/media/video/cx23885/cx23885-dvb.c @@ -472,7 +472,7 @@ static int dvb_register(struct cx23885_tsport *port) static struct xc2028_ctrl ctl = { .fname = XC2028_DEFAULT_FIRMWARE, .max_len = 64, - .scode_table = XC3028_FE_OREN538, + .demod = XC3028_FE_OREN538, }; fe = dvb_attach(xc2028_attach, diff --git a/drivers/media/video/ivtv/ivtv-driver.c b/drivers/media/video/ivtv/ivtv-driver.c index b0195e8ee4d..db2ac9a99ac 100644 --- a/drivers/media/video/ivtv/ivtv-driver.c +++ b/drivers/media/video/ivtv/ivtv-driver.c @@ -305,14 +305,17 @@ int ivtv_waitq(wait_queue_head_t *waitq) /* Generic utility functions */ int ivtv_msleep_timeout(unsigned int msecs, int intr) { - int ret; int timeout = msecs_to_jiffies(msecs); do { set_current_state(intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); timeout = schedule_timeout(timeout); - if (intr && (ret = signal_pending(current))) - return ret; + if (intr) { + int ret = signal_pending(current); + + if (ret) + return ret; + } } while (timeout); return 0; } diff --git a/drivers/media/video/ivtv/ivtv-gpio.c b/drivers/media/video/ivtv/ivtv-gpio.c index ceb05bdcaf6..85ac707228e 100644 --- a/drivers/media/video/ivtv/ivtv-gpio.c +++ b/drivers/media/video/ivtv/ivtv-gpio.c @@ -190,8 +190,8 @@ static int subdev_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt) mask = itv->card->gpio_audio_detect.mask; if (mask == 0 || (read_reg(IVTV_REG_GPIO_IN) & mask)) - vt->rxsubchans = V4L2_TUNER_MODE_STEREO | - V4L2_TUNER_MODE_LANG1 | V4L2_TUNER_MODE_LANG2; + vt->rxsubchans = V4L2_TUNER_SUB_STEREO | + V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2; else vt->rxsubchans = V4L2_TUNER_SUB_MONO; return 0; diff --git a/drivers/media/video/ivtv/ivtv-ioctl.c b/drivers/media/video/ivtv/ivtv-ioctl.c index 4a2d464f055..c342a9fe983 100644 --- a/drivers/media/video/ivtv/ivtv-ioctl.c +++ b/drivers/media/video/ivtv/ivtv-ioctl.c @@ -180,7 +180,7 @@ int ivtv_set_speed(struct ivtv *itv, int speed) /* Wait for any DMA to finish */ prepare_to_wait(&itv->dma_waitq, &wait, TASK_INTERRUPTIBLE); - while (itv->i_flags & IVTV_F_I_DMA) { + while (test_bit(IVTV_F_I_DMA, &itv->i_flags)) { got_sig = signal_pending(current); if (got_sig) break; @@ -1710,7 +1710,8 @@ static int ivtv_decoder_ioctls(struct file *filp, unsigned int cmd, void *arg) we are waiting unlock first and later lock again. */ mutex_unlock(&itv->serialize_lock); prepare_to_wait(&itv->event_waitq, &wait, TASK_INTERRUPTIBLE); - if ((itv->i_flags & (IVTV_F_I_EV_DEC_STOPPED|IVTV_F_I_EV_VSYNC)) == 0) + if (!test_bit(IVTV_F_I_EV_DEC_STOPPED, &itv->i_flags) && + !test_bit(IVTV_F_I_EV_VSYNC, &itv->i_flags)) schedule(); finish_wait(&itv->event_waitq, &wait); mutex_lock(&itv->serialize_lock); diff --git a/drivers/media/video/ivtv/ivtv-irq.c b/drivers/media/video/ivtv/ivtv-irq.c index 01c14d2b381..cd9db0bf33b 100644 --- a/drivers/media/video/ivtv/ivtv-irq.c +++ b/drivers/media/video/ivtv/ivtv-irq.c @@ -196,7 +196,7 @@ static int stream_enc_dma_append(struct ivtv_stream *s, u32 data[CX2341X_MBOX_MA bytes_needed, s->name); return -1; } - if (rc && !s->buffers_stolen && (s->s_flags & IVTV_F_S_APPL_IO)) { + if (rc && !s->buffers_stolen && test_bit(IVTV_F_S_APPL_IO, &s->s_flags)) { IVTV_WARN("All %s stream buffers are full. Dropping data.\n", s->name); IVTV_WARN("Cause: the application is not reading fast enough.\n"); } diff --git a/drivers/media/video/ivtv/ivtv-queue.c b/drivers/media/video/ivtv/ivtv-queue.c index ff7b7deded4..7fde36e6d22 100644 --- a/drivers/media/video/ivtv/ivtv-queue.c +++ b/drivers/media/video/ivtv/ivtv-queue.c @@ -230,7 +230,8 @@ int ivtv_stream_alloc(struct ivtv_stream *s) return -ENOMEM; } if (ivtv_might_use_dma(s)) { - s->sg_handle = pci_map_single(itv->pdev, s->sg_dma, sizeof(struct ivtv_sg_element), s->dma); + s->sg_handle = pci_map_single(itv->pdev, s->sg_dma, + sizeof(struct ivtv_sg_element), PCI_DMA_TODEVICE); ivtv_stream_sync_for_cpu(s); } diff --git a/drivers/media/video/ivtv/ivtv-yuv.c b/drivers/media/video/ivtv/ivtv-yuv.c index 7912ed6b72e..c0875378acc 100644 --- a/drivers/media/video/ivtv/ivtv-yuv.c +++ b/drivers/media/video/ivtv/ivtv-yuv.c @@ -1063,7 +1063,8 @@ static int ivtv_yuv_udma_frame(struct ivtv *itv, struct ivtv_dma_frame *args) prepare_to_wait(&itv->dma_waitq, &wait, TASK_INTERRUPTIBLE); /* if no UDMA is pending and no UDMA is in progress, then the DMA is finished */ - while (itv->i_flags & (IVTV_F_I_UDMA_PENDING | IVTV_F_I_UDMA)) { + while (test_bit(IVTV_F_I_UDMA_PENDING, &itv->i_flags) || + test_bit(IVTV_F_I_UDMA, &itv->i_flags)) { /* don't interrupt if the DMA is in progress but break off a still pending DMA. */ got_sig = signal_pending(current); diff --git a/drivers/media/video/ivtv/ivtvfb.c b/drivers/media/video/ivtv/ivtvfb.c index 66e6eb51307..fa6bb85cb4b 100644 --- a/drivers/media/video/ivtv/ivtvfb.c +++ b/drivers/media/video/ivtv/ivtvfb.c @@ -298,7 +298,8 @@ static int ivtvfb_prep_dec_dma_to_device(struct ivtv *itv, prepare_to_wait(&itv->dma_waitq, &wait, TASK_INTERRUPTIBLE); /* if no UDMA is pending and no UDMA is in progress, then the DMA is finished */ - while (itv->i_flags & (IVTV_F_I_UDMA_PENDING | IVTV_F_I_UDMA)) { + while (test_bit(IVTV_F_I_UDMA_PENDING, &itv->i_flags) || + test_bit(IVTV_F_I_UDMA, &itv->i_flags)) { /* don't interrupt if the DMA is in progress but break off a still pending DMA. */ got_sig = signal_pending(current); diff --git a/drivers/media/video/uvc/uvc_driver.c b/drivers/media/video/uvc/uvc_driver.c index 399412d7f02..507dc85646b 100644 --- a/drivers/media/video/uvc/uvc_driver.c +++ b/drivers/media/video/uvc/uvc_driver.c @@ -1726,14 +1726,17 @@ static int uvc_suspend(struct usb_interface *intf, pm_message_t message) static int __uvc_resume(struct usb_interface *intf, int reset) { struct uvc_device *dev = usb_get_intfdata(intf); - int ret; uvc_trace(UVC_TRACE_SUSPEND, "Resuming interface %u\n", intf->cur_altsetting->desc.bInterfaceNumber); if (intf->cur_altsetting->desc.bInterfaceSubClass == SC_VIDEOCONTROL) { - if (reset && (ret = uvc_ctrl_resume_device(dev)) < 0) - return ret; + if (reset) { + int ret = uvc_ctrl_resume_device(dev); + + if (ret < 0) + return ret; + } return uvc_status_resume(dev); } diff --git a/drivers/media/video/uvc/uvc_video.c b/drivers/media/video/uvc/uvc_video.c index a95e17329c5..6ce974d7362 100644 --- a/drivers/media/video/uvc/uvc_video.c +++ b/drivers/media/video/uvc/uvc_video.c @@ -742,7 +742,7 @@ static int uvc_alloc_urb_buffers(struct uvc_video_device *video, /* Buffers are already allocated, bail out. */ if (video->urb_size) - return 0; + return video->urb_size / psize; /* Compute the number of packets. Bulk endpoints might transfer UVC * payloads accross multiple URBs. diff --git a/drivers/media/video/v4l2-ioctl.c b/drivers/media/video/v4l2-ioctl.c index 88f10d6cbc9..be64a502ea2 100644 --- a/drivers/media/video/v4l2-ioctl.c +++ b/drivers/media/video/v4l2-ioctl.c @@ -42,6 +42,12 @@ printk(KERN_DEBUG "%s: " fmt, vfd->name, ## arg);\ } while (0) +/* Zero out the end of the struct pointed to by p. Everthing after, but + * not including, the specified field is cleared. */ +#define CLEAR_AFTER_FIELD(p, field) \ + memset((u8 *)(p) + offsetof(typeof(*(p)), field) + sizeof((p)->field), \ + 0, sizeof(*(p)) - offsetof(typeof(*(p)), field) - sizeof((p)->field)) + struct std_descr { v4l2_std_id std; const char *descr; @@ -544,39 +550,39 @@ static int check_fmt(const struct v4l2_ioctl_ops *ops, enum v4l2_buf_type type) switch (type) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: - if (ops->vidioc_try_fmt_vid_cap) + if (ops->vidioc_g_fmt_vid_cap) return 0; break; case V4L2_BUF_TYPE_VIDEO_OVERLAY: - if (ops->vidioc_try_fmt_vid_overlay) + if (ops->vidioc_g_fmt_vid_overlay) return 0; break; case V4L2_BUF_TYPE_VIDEO_OUTPUT: - if (ops->vidioc_try_fmt_vid_out) + if (ops->vidioc_g_fmt_vid_out) return 0; break; case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: - if (ops->vidioc_try_fmt_vid_out_overlay) + if (ops->vidioc_g_fmt_vid_out_overlay) return 0; break; case V4L2_BUF_TYPE_VBI_CAPTURE: - if (ops->vidioc_try_fmt_vbi_cap) + if (ops->vidioc_g_fmt_vbi_cap) return 0; break; case V4L2_BUF_TYPE_VBI_OUTPUT: - if (ops->vidioc_try_fmt_vbi_out) + if (ops->vidioc_g_fmt_vbi_out) return 0; break; case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE: - if (ops->vidioc_try_fmt_sliced_vbi_cap) + if (ops->vidioc_g_fmt_sliced_vbi_cap) return 0; break; case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT: - if (ops->vidioc_try_fmt_sliced_vbi_out) + if (ops->vidioc_g_fmt_sliced_vbi_out) return 0; break; case V4L2_BUF_TYPE_PRIVATE: - if (ops->vidioc_try_fmt_type_private) + if (ops->vidioc_g_fmt_type_private) return 0; break; } @@ -782,44 +788,53 @@ static long __video_do_ioctl(struct file *file, switch (f->type) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: + CLEAR_AFTER_FIELD(f, fmt.pix); v4l_print_pix_fmt(vfd, &f->fmt.pix); if (ops->vidioc_s_fmt_vid_cap) ret = ops->vidioc_s_fmt_vid_cap(file, fh, f); break; case V4L2_BUF_TYPE_VIDEO_OVERLAY: + CLEAR_AFTER_FIELD(f, fmt.win); if (ops->vidioc_s_fmt_vid_overlay) ret = ops->vidioc_s_fmt_vid_overlay(file, fh, f); break; case V4L2_BUF_TYPE_VIDEO_OUTPUT: + CLEAR_AFTER_FIELD(f, fmt.pix); v4l_print_pix_fmt(vfd, &f->fmt.pix); if (ops->vidioc_s_fmt_vid_out) ret = ops->vidioc_s_fmt_vid_out(file, fh, f); break; case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: + CLEAR_AFTER_FIELD(f, fmt.win); if (ops->vidioc_s_fmt_vid_out_overlay) ret = ops->vidioc_s_fmt_vid_out_overlay(file, fh, f); break; case V4L2_BUF_TYPE_VBI_CAPTURE: + CLEAR_AFTER_FIELD(f, fmt.vbi); if (ops->vidioc_s_fmt_vbi_cap) ret = ops->vidioc_s_fmt_vbi_cap(file, fh, f); break; case V4L2_BUF_TYPE_VBI_OUTPUT: + CLEAR_AFTER_FIELD(f, fmt.vbi); if (ops->vidioc_s_fmt_vbi_out) ret = ops->vidioc_s_fmt_vbi_out(file, fh, f); break; case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE: + CLEAR_AFTER_FIELD(f, fmt.sliced); if (ops->vidioc_s_fmt_sliced_vbi_cap) ret = ops->vidioc_s_fmt_sliced_vbi_cap(file, fh, f); break; case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT: + CLEAR_AFTER_FIELD(f, fmt.sliced); if (ops->vidioc_s_fmt_sliced_vbi_out) ret = ops->vidioc_s_fmt_sliced_vbi_out(file, fh, f); break; case V4L2_BUF_TYPE_PRIVATE: + /* CLEAR_AFTER_FIELD(f, fmt.raw_data); <- does nothing */ if (ops->vidioc_s_fmt_type_private) ret = ops->vidioc_s_fmt_type_private(file, fh, f); @@ -836,46 +851,55 @@ static long __video_do_ioctl(struct file *file, v4l2_type_names)); switch (f->type) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: + CLEAR_AFTER_FIELD(f, fmt.pix); if (ops->vidioc_try_fmt_vid_cap) ret = ops->vidioc_try_fmt_vid_cap(file, fh, f); if (!ret) v4l_print_pix_fmt(vfd, &f->fmt.pix); break; case V4L2_BUF_TYPE_VIDEO_OVERLAY: + CLEAR_AFTER_FIELD(f, fmt.win); if (ops->vidioc_try_fmt_vid_overlay) ret = ops->vidioc_try_fmt_vid_overlay(file, fh, f); break; case V4L2_BUF_TYPE_VIDEO_OUTPUT: + CLEAR_AFTER_FIELD(f, fmt.pix); if (ops->vidioc_try_fmt_vid_out) ret = ops->vidioc_try_fmt_vid_out(file, fh, f); if (!ret) v4l_print_pix_fmt(vfd, &f->fmt.pix); break; case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: + CLEAR_AFTER_FIELD(f, fmt.win); if (ops->vidioc_try_fmt_vid_out_overlay) ret = ops->vidioc_try_fmt_vid_out_overlay(file, fh, f); break; case V4L2_BUF_TYPE_VBI_CAPTURE: + CLEAR_AFTER_FIELD(f, fmt.vbi); if (ops->vidioc_try_fmt_vbi_cap) ret = ops->vidioc_try_fmt_vbi_cap(file, fh, f); break; case V4L2_BUF_TYPE_VBI_OUTPUT: + CLEAR_AFTER_FIELD(f, fmt.vbi); if (ops->vidioc_try_fmt_vbi_out) ret = ops->vidioc_try_fmt_vbi_out(file, fh, f); break; case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE: + CLEAR_AFTER_FIELD(f, fmt.sliced); if (ops->vidioc_try_fmt_sliced_vbi_cap) ret = ops->vidioc_try_fmt_sliced_vbi_cap(file, fh, f); break; case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT: + CLEAR_AFTER_FIELD(f, fmt.sliced); if (ops->vidioc_try_fmt_sliced_vbi_out) ret = ops->vidioc_try_fmt_sliced_vbi_out(file, fh, f); break; case V4L2_BUF_TYPE_PRIVATE: + /* CLEAR_AFTER_FIELD(f, fmt.raw_data); <- does nothing */ if (ops->vidioc_try_fmt_type_private) ret = ops->vidioc_try_fmt_type_private(file, fh, f); @@ -898,6 +922,9 @@ static long __video_do_ioctl(struct file *file, if (ret) break; + if (p->type < V4L2_BUF_TYPE_PRIVATE) + CLEAR_AFTER_FIELD(p, memory); + ret = ops->vidioc_reqbufs(file, fh, p); dbgarg(cmd, "count=%d, type=%s, memory=%s\n", p->count, diff --git a/drivers/media/video/zoran/zoran_driver.c b/drivers/media/video/zoran/zoran_driver.c index 092333b1c34..643cccaa1aa 100644 --- a/drivers/media/video/zoran/zoran_driver.c +++ b/drivers/media/video/zoran/zoran_driver.c @@ -1863,22 +1863,20 @@ static int zoran_querycap(struct file *file, void *__fh, struct v4l2_capability static int zoran_enum_fmt(struct zoran *zr, struct v4l2_fmtdesc *fmt, int flag) { - int num = -1, i; + unsigned int num, i; - for (i = 0; i < NUM_FORMATS; i++) { - if (zoran_formats[i].flags & flag) - num++; - if (num == fmt->index) - break; + for (num = i = 0; i < NUM_FORMATS; i++) { + if (zoran_formats[i].flags & flag && num++ == fmt->index) { + strncpy(fmt->description, zoran_formats[i].name, + sizeof(fmt->description) - 1); + /* fmt struct pre-zeroed, so adding '\0' not neeed */ + fmt->pixelformat = zoran_formats[i].fourcc; + if (zoran_formats[i].flags & ZORAN_FORMAT_COMPRESSED) + fmt->flags |= V4L2_FMT_FLAG_COMPRESSED; + return 0; + } } - if (fmt->index < 0 /* late, but not too late */ || i == NUM_FORMATS) - return -EINVAL; - - strncpy(fmt->description, zoran_formats[i].name, sizeof(fmt->description)-1); - fmt->pixelformat = zoran_formats[i].fourcc; - if (zoran_formats[i].flags & ZORAN_FORMAT_COMPRESSED) - fmt->flags |= V4L2_FMT_FLAG_COMPRESSED; - return 0; + return -EINVAL; } static int zoran_enum_fmt_vid_cap(struct file *file, void *__fh, diff --git a/drivers/mfd/pcf50633-core.c b/drivers/mfd/pcf50633-core.c index 7793932a513..11a6248cc1c 100644 --- a/drivers/mfd/pcf50633-core.c +++ b/drivers/mfd/pcf50633-core.c @@ -443,7 +443,7 @@ static irqreturn_t pcf50633_irq(int irq, void *data) dev_dbg(pcf->dev, "pcf50633_irq\n"); get_device(pcf->dev); - disable_irq(pcf->irq); + disable_irq_nosync(pcf->irq); schedule_work(&pcf->irq_work); return IRQ_HANDLED; diff --git a/drivers/mfd/wm8350-core.c b/drivers/mfd/wm8350-core.c index c2be3088e2e..fe24079387c 100644 --- a/drivers/mfd/wm8350-core.c +++ b/drivers/mfd/wm8350-core.c @@ -79,10 +79,6 @@ static int wm8350_phys_read(struct wm8350 *wm8350, u8 reg, int num_regs, /* Cache is CPU endian */ dest[i - reg] = be16_to_cpu(dest[i - reg]); - /* Satisfy non-volatile bits from cache */ - dest[i - reg] &= wm8350_reg_io_map[i].vol; - dest[i - reg] |= wm8350->reg_cache[i]; - /* Mask out non-readable bits */ dest[i - reg] &= wm8350_reg_io_map[i].readable; } @@ -182,9 +178,6 @@ static int wm8350_write(struct wm8350 *wm8350, u8 reg, int num_regs, u16 *src) (wm8350->reg_cache[i] & ~wm8350_reg_io_map[i].writable) | src[i - reg]; - /* Don't store volatile bits */ - wm8350->reg_cache[i] &= ~wm8350_reg_io_map[i].vol; - src[i - reg] = cpu_to_be16(src[i - reg]); } @@ -1261,7 +1254,6 @@ static int wm8350_create_cache(struct wm8350 *wm8350, int type, int mode) (i < WM8350_CLOCK_CONTROL_1 || i > WM8350_AIF_TEST)) { value = be16_to_cpu(wm8350->reg_cache[i]); value &= wm8350_reg_io_map[i].readable; - value &= ~wm8350_reg_io_map[i].vol; wm8350->reg_cache[i] = value; } else wm8350->reg_cache[i] = reg_map[i]; diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c index 3cf61ece71d..348443bdb23 100644 --- a/drivers/misc/enclosure.c +++ b/drivers/misc/enclosure.c @@ -119,7 +119,7 @@ enclosure_register(struct device *dev, const char *name, int components, edev->edev.class = &enclosure_class; edev->edev.parent = get_device(dev); edev->cb = cb; - dev_set_name(&edev->edev, name); + dev_set_name(&edev->edev, "%s", name); err = device_register(&edev->edev); if (err) goto err; @@ -255,8 +255,8 @@ enclosure_component_register(struct enclosure_device *edev, ecomp->number = number; cdev = &ecomp->cdev; cdev->parent = get_device(&edev->edev); - if (name) - dev_set_name(cdev, name); + if (name && name[0]) + dev_set_name(cdev, "%s", name); else dev_set_name(cdev, "%u", number); diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index 36875dcfa49..7d4febdab28 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -490,7 +490,7 @@ static void mmci_check_status(unsigned long data) mod_timer(&host->timer, jiffies + HZ); } -static int __devinit mmci_probe(struct amba_device *dev, void *id) +static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id) { struct mmc_platform_data *plat = dev->dev.platform_data; struct mmci_host *host; diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c index c643d0fe118..b56d72ff06e 100644 --- a/drivers/mmc/host/mvsdio.c +++ b/drivers/mmc/host/mvsdio.c @@ -64,6 +64,31 @@ static int mvsd_setup_data(struct mvsd_host *host, struct mmc_data *data) unsigned int tmout; int tmout_index; + /* + * Hardware weirdness. The FIFO_EMPTY bit of the HW_STATE + * register is sometimes not set before a while when some + * "unusual" data block sizes are used (such as with the SWITCH + * command), even despite the fact that the XFER_DONE interrupt + * was raised. And if another data transfer starts before + * this bit comes to good sense (which eventually happens by + * itself) then the new transfer simply fails with a timeout. + */ + if (!(mvsd_read(MVSD_HW_STATE) & (1 << 13))) { + unsigned long t = jiffies + HZ; + unsigned int hw_state, count = 0; + do { + if (time_after(jiffies, t)) { + dev_warn(host->dev, "FIFO_EMPTY bit missing\n"); + break; + } + hw_state = mvsd_read(MVSD_HW_STATE); + count++; + } while (!(hw_state & (1 << 13))); + dev_dbg(host->dev, "*** wait for FIFO_EMPTY bit " + "(hw=0x%04x, count=%d, jiffies=%ld)\n", + hw_state, count, jiffies - (t - HZ)); + } + /* If timeout=0 then maximum timeout index is used. */ tmout = DIV_ROUND_UP(data->timeout_ns, host->ns_per_clk); tmout += data->timeout_clks; @@ -620,9 +645,18 @@ static void mvsd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) if (ios->bus_width == MMC_BUS_WIDTH_4) ctrl_reg |= MVSD_HOST_CTRL_DATA_WIDTH_4_BITS; + /* + * The HI_SPEED_EN bit is causing trouble with many (but not all) + * high speed SD, SDHC and SDIO cards. Not enabling that bit + * makes all cards work. So let's just ignore that bit for now + * and revisit this issue if problems for not enabling this bit + * are ever reported. + */ +#if 0 if (ios->timing == MMC_TIMING_MMC_HS || ios->timing == MMC_TIMING_SD_HS) ctrl_reg |= MVSD_HOST_CTRL_HI_SPEED_EN; +#endif host->ctrl = ctrl_reg; mvsd_write(MVSD_HOST_CTRL, ctrl_reg); @@ -882,3 +916,4 @@ module_param(nodma, int, 0); MODULE_AUTHOR("Maen Suleiman, Nicolas Pitre"); MODULE_DESCRIPTION("Marvell MMC,SD,SDIO Host Controller driver"); MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:mvsdio"); diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c index b4a615c55f2..f4cbe473670 100644 --- a/drivers/mmc/host/mxcmmc.c +++ b/drivers/mmc/host/mxcmmc.c @@ -140,6 +140,8 @@ struct mxcmci_host { struct work_struct datawork; }; +static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios); + static inline int mxcmci_use_dma(struct mxcmci_host *host) { return host->do_dma; @@ -160,7 +162,7 @@ static void mxcmci_softreset(struct mxcmci_host *host) writew(0xff, host->base + MMC_REG_RES_TO); } -static void mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data) +static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data) { unsigned int nob = data->blocks; unsigned int blksz = data->blksz; @@ -168,6 +170,7 @@ static void mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data) #ifdef HAS_DMA struct scatterlist *sg; int i; + int ret; #endif if (data->flags & MMC_DATA_STREAM) nob = 0xffff; @@ -183,7 +186,7 @@ static void mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data) for_each_sg(data->sg, sg, data->sg_len, i) { if (sg->offset & 3 || sg->length & 3) { host->do_dma = 0; - return; + return 0; } } @@ -192,23 +195,30 @@ static void mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data) host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, host->dma_dir); - imx_dma_setup_sg(host->dma, data->sg, host->dma_nents, datasize, - host->res->start + MMC_REG_BUFFER_ACCESS, - DMA_MODE_READ); + ret = imx_dma_setup_sg(host->dma, data->sg, host->dma_nents, + datasize, + host->res->start + MMC_REG_BUFFER_ACCESS, + DMA_MODE_READ); } else { host->dma_dir = DMA_TO_DEVICE; host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, host->dma_dir); - imx_dma_setup_sg(host->dma, data->sg, host->dma_nents, datasize, - host->res->start + MMC_REG_BUFFER_ACCESS, - DMA_MODE_WRITE); + ret = imx_dma_setup_sg(host->dma, data->sg, host->dma_nents, + datasize, + host->res->start + MMC_REG_BUFFER_ACCESS, + DMA_MODE_WRITE); } + if (ret) { + dev_err(mmc_dev(host->mmc), "failed to setup DMA : %d\n", ret); + return ret; + } wmb(); imx_dma_enable(host->dma); #endif /* HAS_DMA */ + return 0; } static int mxcmci_start_cmd(struct mxcmci_host *host, struct mmc_command *cmd, @@ -345,8 +355,11 @@ static int mxcmci_poll_status(struct mxcmci_host *host, u32 mask) stat = readl(host->base + MMC_REG_STATUS); if (stat & STATUS_ERR_MASK) return stat; - if (time_after(jiffies, timeout)) + if (time_after(jiffies, timeout)) { + mxcmci_softreset(host); + mxcmci_set_clk_rate(host, host->clock); return STATUS_TIME_OUT_READ; + } if (stat & mask) return 0; cpu_relax(); @@ -531,6 +544,7 @@ static void mxcmci_request(struct mmc_host *mmc, struct mmc_request *req) { struct mxcmci_host *host = mmc_priv(mmc); unsigned int cmdat = host->cmdat; + int error; WARN_ON(host->req != NULL); @@ -540,7 +554,12 @@ static void mxcmci_request(struct mmc_host *mmc, struct mmc_request *req) host->do_dma = 1; #endif if (req->data) { - mxcmci_setup_data(host, req->data); + error = mxcmci_setup_data(host, req->data); + if (error) { + req->cmd->error = error; + goto out; + } + cmdat |= CMD_DAT_CONT_DATA_ENABLE; @@ -548,7 +567,9 @@ static void mxcmci_request(struct mmc_host *mmc, struct mmc_request *req) cmdat |= CMD_DAT_CONT_WRITE; } - if (mxcmci_start_cmd(host, req->cmd, cmdat)) + error = mxcmci_start_cmd(host, req->cmd, cmdat); +out: + if (error) mxcmci_finish_request(host, req); } @@ -724,7 +745,9 @@ static int mxcmci_probe(struct platform_device *pdev) goto out_clk_put; } - mmc->f_min = clk_get_rate(host->clk) >> 7; + mmc->f_min = clk_get_rate(host->clk) >> 16; + if (mmc->f_min < 400000) + mmc->f_min = 400000; mmc->f_max = clk_get_rate(host->clk) >> 1; /* recommended in data sheet */ diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c index bfa25c01c87..dceb5ee3bda 100644 --- a/drivers/mmc/host/omap.c +++ b/drivers/mmc/host/omap.c @@ -822,7 +822,7 @@ static irqreturn_t mmc_omap_irq(int irq, void *dev_id) del_timer(&host->cmd_abort_timer); host->abort = 1; OMAP_MMC_WRITE(host, IE, 0); - disable_irq(host->irq); + disable_irq_nosync(host->irq); schedule_work(&host->cmd_abort_work); return IRQ_HANDLED; } diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index e62a22a7f00..c40cb96255a 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c @@ -680,7 +680,7 @@ static void mmc_omap_dma_cb(int lch, u16 ch_status, void *data) host->dma_ch = -1; /* * DMA Callback: run in interrupt context. - * mutex_unlock will through a kernel warning if used. + * mutex_unlock will throw a kernel warning if used. */ up(&host->sem); } diff --git a/drivers/mmc/host/sdhci-of.c b/drivers/mmc/host/sdhci-of.c index 3ff4ac3abe8..128c614d11a 100644 --- a/drivers/mmc/host/sdhci-of.c +++ b/drivers/mmc/host/sdhci-of.c @@ -55,7 +55,13 @@ static u32 esdhc_readl(struct sdhci_host *host, int reg) static u16 esdhc_readw(struct sdhci_host *host, int reg) { - return in_be16(host->ioaddr + (reg ^ 0x2)); + u16 ret; + + if (unlikely(reg == SDHCI_HOST_VERSION)) + ret = in_be16(host->ioaddr + reg); + else + ret = in_be16(host->ioaddr + (reg ^ 0x2)); + return ret; } static u8 esdhc_readb(struct sdhci_host *host, int reg) @@ -277,6 +283,7 @@ static int __devexit sdhci_of_remove(struct of_device *ofdev) static const struct of_device_id sdhci_of_match[] = { { .compatible = "fsl,mpc8379-esdhc", .data = &sdhci_esdhc, }, { .compatible = "fsl,mpc8536-esdhc", .data = &sdhci_esdhc, }, + { .compatible = "fsl,esdhc", .data = &sdhci_esdhc, }, { .compatible = "generic-sdhci", }, {}, }; diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c index 62dee54af0a..43976aa4dbb 100644 --- a/drivers/mtd/devices/mtd_dataflash.c +++ b/drivers/mtd/devices/mtd_dataflash.c @@ -178,7 +178,7 @@ static int dataflash_erase(struct mtd_info *mtd, struct erase_info *instr) /* Calculate flash page address; use block erase (for speed) if * we're at a block boundary and need to erase the whole block. */ - pageaddr = div_u64(instr->len, priv->page_size); + pageaddr = div_u64(instr->addr, priv->page_size); do_block = (pageaddr & 0x7) == 0 && instr->len >= blocksize; pageaddr = pageaddr << priv->page_offset; diff --git a/drivers/mtd/mtdsuper.c b/drivers/mtd/mtdsuper.c index 92285d0089c..af8b42e0a55 100644 --- a/drivers/mtd/mtdsuper.c +++ b/drivers/mtd/mtdsuper.c @@ -74,8 +74,7 @@ static int get_sb_mtd_aux(struct file_system_type *fs_type, int flags, ret = fill_super(sb, data, flags & MS_SILENT ? 1 : 0); if (ret < 0) { - up_write(&sb->s_umount); - deactivate_super(sb); + deactivate_locked_super(sb); return ret; } diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c index 0119220de7d..02700f769b8 100644 --- a/drivers/mtd/nand/davinci_nand.c +++ b/drivers/mtd/nand/davinci_nand.c @@ -407,16 +407,17 @@ static int __init nand_davinci_probe(struct platform_device *pdev) } info->chip.ecc.mode = ecc_mode; - info->clk = clk_get(&pdev->dev, "AEMIFCLK"); + info->clk = clk_get(&pdev->dev, "aemif"); if (IS_ERR(info->clk)) { ret = PTR_ERR(info->clk); - dev_dbg(&pdev->dev, "unable to get AEMIFCLK, err %d\n", ret); + dev_dbg(&pdev->dev, "unable to get AEMIF clock, err %d\n", ret); goto err_clk; } ret = clk_enable(info->clk); if (ret < 0) { - dev_dbg(&pdev->dev, "unable to enable AEMIFCLK, err %d\n", ret); + dev_dbg(&pdev->dev, "unable to enable AEMIF clock, err %d\n", + ret); goto err_clk_enable; } diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c index f3548d04801..40c26080ecd 100644 --- a/drivers/mtd/nand/mxc_nand.c +++ b/drivers/mtd/nand/mxc_nand.c @@ -831,6 +831,7 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command, break; case NAND_CMD_READID: + host->col_addr = 0; send_read_id(host); break; @@ -867,6 +868,7 @@ static int __init mxcnd_probe(struct platform_device *pdev) mtd->priv = this; mtd->owner = THIS_MODULE; mtd->dev.parent = &pdev->dev; + mtd->name = "mxc_nand"; /* 50 us command delay time */ this->chip_delay = 5; @@ -882,8 +884,10 @@ static int __init mxcnd_probe(struct platform_device *pdev) this->verify_buf = mxc_nand_verify_buf; host->clk = clk_get(&pdev->dev, "nfc"); - if (IS_ERR(host->clk)) + if (IS_ERR(host->clk)) { + err = PTR_ERR(host->clk); goto eclk; + } clk_enable(host->clk); host->clk_act = 1; @@ -896,7 +900,7 @@ static int __init mxcnd_probe(struct platform_device *pdev) host->regs = ioremap(res->start, res->end - res->start + 1); if (!host->regs) { - err = -EIO; + err = -ENOMEM; goto eres; } @@ -1011,30 +1015,35 @@ static int __devexit mxcnd_remove(struct platform_device *pdev) #ifdef CONFIG_PM static int mxcnd_suspend(struct platform_device *pdev, pm_message_t state) { - struct mtd_info *info = platform_get_drvdata(pdev); + struct mtd_info *mtd = platform_get_drvdata(pdev); + struct nand_chip *nand_chip = mtd->priv; + struct mxc_nand_host *host = nand_chip->priv; int ret = 0; DEBUG(MTD_DEBUG_LEVEL0, "MXC_ND : NAND suspend\n"); - if (info) - ret = info->suspend(info); - - /* Disable the NFC clock */ - clk_disable(nfc_clk); /* FIXME */ + if (mtd) { + ret = mtd->suspend(mtd); + /* Disable the NFC clock */ + clk_disable(host->clk); + } return ret; } static int mxcnd_resume(struct platform_device *pdev) { - struct mtd_info *info = platform_get_drvdata(pdev); + struct mtd_info *mtd = platform_get_drvdata(pdev); + struct nand_chip *nand_chip = mtd->priv; + struct mxc_nand_host *host = nand_chip->priv; int ret = 0; DEBUG(MTD_DEBUG_LEVEL0, "MXC_ND : NAND resume\n"); - /* Enable the NFC clock */ - clk_enable(nfc_clk); /* FIXME */ - if (info) - info->resume(info); + if (mtd) { + /* Enable the NFC clock */ + clk_enable(host->clk); + mtd->resume(mtd); + } return ret; } @@ -1055,13 +1064,7 @@ static struct platform_driver mxcnd_driver = { static int __init mxc_nd_init(void) { - /* Register the device driver structure. */ - pr_info("MXC MTD nand Driver\n"); - if (platform_driver_probe(&mxcnd_driver, mxcnd_probe) != 0) { - printk(KERN_ERR "Driver register failed for mxcnd_driver\n"); - return -ENODEV; - } - return 0; + return platform_driver_probe(&mxcnd_driver, mxcnd_probe); } static void __exit mxc_nd_cleanup(void) diff --git a/drivers/net/3c509.c b/drivers/net/3c509.c index fbb37192199..682aad89708 100644 --- a/drivers/net/3c509.c +++ b/drivers/net/3c509.c @@ -480,9 +480,13 @@ static int pnp_registered; #ifdef CONFIG_EISA static struct eisa_device_id el3_eisa_ids[] = { + { "TCM5090" }, + { "TCM5091" }, { "TCM5092" }, { "TCM5093" }, + { "TCM5094" }, { "TCM5095" }, + { "TCM5098" }, { "" } }; MODULE_DEVICE_TABLE(eisa, el3_eisa_ids); diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 1fc4602a6ff..a1c25cb4669 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -102,7 +102,7 @@ obj-$(CONFIG_HAMACHI) += hamachi.o obj-$(CONFIG_NET) += Space.o loopback.o obj-$(CONFIG_SEEQ8005) += seeq8005.o obj-$(CONFIG_NET_SB1000) += sb1000.o -obj-$(CONFIG_MAC8390) += mac8390.o 8390.o +obj-$(CONFIG_MAC8390) += mac8390.o obj-$(CONFIG_APNE) += apne.o 8390.o obj-$(CONFIG_PCMCIA_PCNET) += 8390.o obj-$(CONFIG_HP100) += hp100.o diff --git a/drivers/net/arm/ixp4xx_eth.c b/drivers/net/arm/ixp4xx_eth.c index 448487e22fa..a740053d3af 100644 --- a/drivers/net/arm/ixp4xx_eth.c +++ b/drivers/net/arm/ixp4xx_eth.c @@ -338,12 +338,12 @@ static int ixp4xx_mdio_register(void) if (cpu_is_ixp43x()) { /* IXP43x lacks NPE-B and uses NPE-C for MII PHY access */ if (!(ixp4xx_read_feature_bits() & IXP4XX_FEATURE_NPEC_ETH)) - return -ENOSYS; + return -ENODEV; mdio_regs = (struct eth_regs __iomem *)IXP4XX_EthC_BASE_VIRT; } else { /* All MII PHY accesses use NPE-B Ethernet registers */ if (!(ixp4xx_read_feature_bits() & IXP4XX_FEATURE_NPEB_ETH0)) - return -ENOSYS; + return -ENODEV; mdio_regs = (struct eth_regs __iomem *)IXP4XX_EthB_BASE_VIRT; } @@ -1174,7 +1174,7 @@ static int __devinit eth_init_one(struct platform_device *pdev) regs_phys = IXP4XX_EthC_BASE_PHYS; break; default: - err = -ENOSYS; + err = -ENODEV; goto err_free; } @@ -1189,15 +1189,10 @@ static int __devinit eth_init_one(struct platform_device *pdev) goto err_free; } - if (register_netdev(dev)) { - err = -EIO; - goto err_npe_rel; - } - port->mem_res = request_mem_region(regs_phys, REGS_SIZE, dev->name); if (!port->mem_res) { err = -EBUSY; - goto err_unreg; + goto err_npe_rel; } port->plat = plat; @@ -1215,20 +1210,25 @@ static int __devinit eth_init_one(struct platform_device *pdev) snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT, "0", plat->phy); port->phydev = phy_connect(dev, phy_id, &ixp4xx_adjust_link, 0, PHY_INTERFACE_MODE_MII); - if (IS_ERR(port->phydev)) { - printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); - return PTR_ERR(port->phydev); - } + if ((err = IS_ERR(port->phydev))) + goto err_free_mem; port->phydev->irq = PHY_POLL; + if ((err = register_netdev(dev))) + goto err_phy_dis; + printk(KERN_INFO "%s: MII PHY %i on %s\n", dev->name, plat->phy, npe_name(port->npe)); return 0; -err_unreg: - unregister_netdev(dev); +err_phy_dis: + phy_disconnect(port->phydev); +err_free_mem: + npe_port_tab[NPE_ID(port->id)] = NULL; + platform_set_drvdata(pdev, NULL); + release_resource(port->mem_res); err_npe_rel: npe_release(port->npe); err_free: @@ -1242,6 +1242,7 @@ static int __devexit eth_remove_one(struct platform_device *pdev) struct port *port = netdev_priv(dev); unregister_netdev(dev); + phy_disconnect(port->phydev); npe_port_tab[NPE_ID(port->id)] = NULL; platform_set_drvdata(pdev, NULL); npe_release(port->npe); diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c index fb57b750866..1342418fb20 100644 --- a/drivers/net/atl1e/atl1e_main.c +++ b/drivers/net/atl1e/atl1e_main.c @@ -37,6 +37,7 @@ char atl1e_driver_version[] = DRV_VERSION; */ static struct pci_device_id atl1e_pci_tbl[] = { {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1E)}, + {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, 0x1066)}, /* required last entry */ { 0 } }; diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c index 0ab22540bf5..4e817126e28 100644 --- a/drivers/net/atlx/atl1.c +++ b/drivers/net/atlx/atl1.c @@ -82,6 +82,12 @@ #include "atl1.h" +#define ATLX_DRIVER_VERSION "2.1.3" +MODULE_AUTHOR("Xiong Huang , \ + Chris Snook , Jay Cliburn "); +MODULE_LICENSE("GPL"); +MODULE_VERSION(ATLX_DRIVER_VERSION); + /* Temporary hack for merging atl1 and atl2 */ #include "atlx.c" diff --git a/drivers/net/atlx/atlx.h b/drivers/net/atlx/atlx.h index 297a03da6b7..14054b75aa6 100644 --- a/drivers/net/atlx/atlx.h +++ b/drivers/net/atlx/atlx.h @@ -29,12 +29,6 @@ #include #include -#define ATLX_DRIVER_VERSION "2.1.3" -MODULE_AUTHOR("Xiong Huang , \ - Chris Snook , Jay Cliburn "); -MODULE_LICENSE("GPL"); -MODULE_VERSION(ATLX_DRIVER_VERSION); - #define ATLX_ERR_PHY 2 #define ATLX_ERR_PHY_SPEED 7 #define ATLX_ERR_PHY_RES 8 diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h index c49ddd08b2a..b4bb06fdf30 100644 --- a/drivers/net/benet/be.h +++ b/drivers/net/benet/be.h @@ -35,8 +35,22 @@ #define DRV_VER "2.0.348" #define DRV_NAME "be2net" #define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC" +#define OC_NAME "Emulex OneConnect 10Gbps NIC" #define DRV_DESC BE_NAME "Driver" +#define BE_VENDOR_ID 0x19a2 +#define BE_DEVICE_ID1 0x211 +#define OC_DEVICE_ID1 0x700 +#define OC_DEVICE_ID2 0x701 + +static inline char *nic_name(struct pci_dev *pdev) +{ + if (pdev->device == OC_DEVICE_ID1 || pdev->device == OC_DEVICE_ID2) + return OC_NAME; + else + return BE_NAME; +} + /* Number of bytes of an RX frame that are copied to skb->data */ #define BE_HDR_LEN 64 #define BE_MAX_JUMBO_FRAME_SIZE 9018 diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c index 30d0c81c989..5c378b5e8e4 100644 --- a/drivers/net/benet/be_main.c +++ b/drivers/net/benet/be_main.c @@ -28,10 +28,10 @@ static unsigned int rx_frag_size = 2048; module_param(rx_frag_size, uint, S_IRUGO); MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data."); -#define BE_VENDOR_ID 0x19a2 -#define BE2_DEVICE_ID_1 0x0211 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = { - { PCI_DEVICE(BE_VENDOR_ID, BE2_DEVICE_ID_1) }, + { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) }, + { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) }, + { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) }, { 0 } }; MODULE_DEVICE_TABLE(pci, be_dev_ids); @@ -1859,7 +1859,7 @@ static int __devinit be_probe(struct pci_dev *pdev, if (status != 0) goto stats_clean; - dev_info(&pdev->dev, BE_NAME " port %d\n", adapter->port_num); + dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num); return 0; stats_clean: @@ -1873,7 +1873,7 @@ rel_reg: disable_dev: pci_disable_device(pdev); do_none: - dev_warn(&pdev->dev, BE_NAME " initialization failed\n"); + dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev)); return status; } diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c index 9f971ed6b58..b4da1821332 100644 --- a/drivers/net/bfin_mac.c +++ b/drivers/net/bfin_mac.c @@ -979,22 +979,7 @@ static int bfin_mac_open(struct net_device *dev) return 0; } -static const struct net_device_ops bfin_mac_netdev_ops = { - .ndo_open = bfin_mac_open, - .ndo_stop = bfin_mac_close, - .ndo_start_xmit = bfin_mac_hard_start_xmit, - .ndo_set_mac_address = bfin_mac_set_mac_address, - .ndo_tx_timeout = bfin_mac_timeout, - .ndo_set_multicast_list = bfin_mac_set_multicast_list, - .ndo_validate_addr = eth_validate_addr, - .ndo_change_mtu = eth_change_mtu, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = bfin_mac_poll, -#endif -}; - /* - * * this makes the board clean up everything that it can * and not talk to the outside world. Caused by * an 'ifconfig ethX down' @@ -1019,6 +1004,20 @@ static int bfin_mac_close(struct net_device *dev) return 0; } +static const struct net_device_ops bfin_mac_netdev_ops = { + .ndo_open = bfin_mac_open, + .ndo_stop = bfin_mac_close, + .ndo_start_xmit = bfin_mac_hard_start_xmit, + .ndo_set_mac_address = bfin_mac_set_mac_address, + .ndo_tx_timeout = bfin_mac_timeout, + .ndo_set_multicast_list = bfin_mac_set_multicast_list, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = bfin_mac_poll, +#endif +}; + static int __devinit bfin_mac_probe(struct platform_device *pdev) { struct net_device *ndev; diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index d47839184a0..b0cb29d4cc0 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c @@ -54,8 +54,8 @@ #define DRV_MODULE_NAME "bnx2" #define PFX DRV_MODULE_NAME ": " -#define DRV_MODULE_VERSION "2.0.0" -#define DRV_MODULE_RELDATE "April 2, 2009" +#define DRV_MODULE_VERSION "2.0.1" +#define DRV_MODULE_RELDATE "May 6, 2009" #define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-4.6.16.fw" #define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-4.6.16.fw" #define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-4.6.17.fw" @@ -2600,6 +2600,7 @@ bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi) /* Tell compiler that status block fields can change. */ barrier(); cons = *bnapi->hw_tx_cons_ptr; + barrier(); if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT)) cons++; return cons; @@ -2879,6 +2880,7 @@ bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi) /* Tell compiler that status block fields can change. */ barrier(); cons = *bnapi->hw_rx_cons_ptr; + barrier(); if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)) cons++; return cons; diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index 8c2e5ab51f0..faf094abef7 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -1465,6 +1465,12 @@ static struct aggregator *ad_agg_selection_test(struct aggregator *best, return best; } +static int agg_device_up(const struct aggregator *agg) +{ + return (netif_running(agg->slave->dev) && + netif_carrier_ok(agg->slave->dev)); +} + /** * ad_agg_selection_logic - select an aggregation group for a team * @aggregator: the aggregator we're looking at @@ -1496,14 +1502,13 @@ static void ad_agg_selection_logic(struct aggregator *agg) struct port *port; origin = agg; - active = __get_active_agg(agg); - best = active; + best = (active && agg_device_up(active)) ? active : NULL; do { agg->is_active = 0; - if (agg->num_of_ports) + if (agg->num_of_ports && agg_device_up(agg)) best = ad_agg_selection_test(best, agg); } while ((agg = __get_next_agg(agg))); diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 2188a96fc09..74824028f85 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -5181,7 +5181,6 @@ static int __init bonding_init(void) { int i; int res; - struct bonding *bond; printk(KERN_INFO "%s", version); @@ -5212,13 +5211,6 @@ static int __init bonding_init(void) goto out; err: - list_for_each_entry(bond, &bond_dev_list, bond_list) { - bond_work_cancel_all(bond); - destroy_workqueue(bond->wq); - } - - bond_destroy_sysfs(); - rtnl_lock(); bond_free_all(); rtnl_unlock(); diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h index 714df2b675e..c888e97c967 100644 --- a/drivers/net/cxgb3/adapter.h +++ b/drivers/net/cxgb3/adapter.h @@ -85,8 +85,8 @@ struct fl_pg_chunk { struct page *page; void *va; unsigned int offset; - u64 *p_cnt; - DECLARE_PCI_UNMAP_ADDR(mapping); + unsigned long *p_cnt; + dma_addr_t mapping; }; struct rx_desc; diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c index 7ea48414c6c..17858b9a583 100644 --- a/drivers/net/cxgb3/cxgb3_main.c +++ b/drivers/net/cxgb3/cxgb3_main.c @@ -2496,14 +2496,16 @@ static void check_link_status(struct adapter *adapter) for_each_port(adapter, i) { struct net_device *dev = adapter->port[i]; struct port_info *p = netdev_priv(dev); + int link_fault; spin_lock_irq(&adapter->work_lock); - if (p->link_fault) { + link_fault = p->link_fault; + spin_unlock_irq(&adapter->work_lock); + + if (link_fault) { t3_link_fault(adapter, i); - spin_unlock_irq(&adapter->work_lock); continue; } - spin_unlock_irq(&adapter->work_lock); if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev)) { t3_xgm_intr_disable(adapter, i); diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c index 26d3587f339..b3ee2bc1a00 100644 --- a/drivers/net/cxgb3/sge.c +++ b/drivers/net/cxgb3/sge.c @@ -355,7 +355,7 @@ static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q, (*d->pg_chunk.p_cnt)--; if (!*d->pg_chunk.p_cnt) pci_unmap_page(pdev, - pci_unmap_addr(&d->pg_chunk, mapping), + d->pg_chunk.mapping, q->alloc_size, PCI_DMA_FROMDEVICE); put_page(d->pg_chunk.page); @@ -454,7 +454,7 @@ static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q, q->pg_chunk.offset = 0; mapping = pci_map_page(adapter->pdev, q->pg_chunk.page, 0, q->alloc_size, PCI_DMA_FROMDEVICE); - pci_unmap_addr_set(&q->pg_chunk, mapping, mapping); + q->pg_chunk.mapping = mapping; } sd->pg_chunk = q->pg_chunk; @@ -511,8 +511,7 @@ static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp) nomem: q->alloc_failed++; break; } - mapping = pci_unmap_addr(&sd->pg_chunk, mapping) + - sd->pg_chunk.offset; + mapping = sd->pg_chunk.mapping + sd->pg_chunk.offset; pci_unmap_addr_set(sd, dma_addr, mapping); add_one_rx_chunk(mapping, d, q->gen); @@ -881,7 +880,7 @@ recycle: (*sd->pg_chunk.p_cnt)--; if (!*sd->pg_chunk.p_cnt) pci_unmap_page(adap->pdev, - pci_unmap_addr(&sd->pg_chunk, mapping), + sd->pg_chunk.mapping, fl->alloc_size, PCI_DMA_FROMDEVICE); if (!skb) { @@ -2096,7 +2095,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs, (*sd->pg_chunk.p_cnt)--; if (!*sd->pg_chunk.p_cnt) pci_unmap_page(adap->pdev, - pci_unmap_addr(&sd->pg_chunk, mapping), + sd->pg_chunk.mapping, fl->alloc_size, PCI_DMA_FROMDEVICE); diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c index 4f68aeb2679..4950d5d789a 100644 --- a/drivers/net/cxgb3/t3_hw.c +++ b/drivers/net/cxgb3/t3_hw.c @@ -1274,6 +1274,11 @@ void t3_link_fault(struct adapter *adapter, int port_id) A_XGM_INT_STATUS + mac->offset); link_fault &= F_LINKFAULTCHANGE; + link_ok = lc->link_ok; + speed = lc->speed; + duplex = lc->duplex; + fc = lc->fc; + phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc); if (link_fault) { diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index b1419e21b46..fffb006b7d9 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c @@ -4027,8 +4027,9 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, PCI_DMA_FROMDEVICE); length = le16_to_cpu(rx_desc->length); - - if (unlikely(!(status & E1000_RXD_STAT_EOP))) { + /* !EOP means multiple descriptors were used to store a single + * packet, also make sure the frame isn't just CRC only */ + if (unlikely(!(status & E1000_RXD_STAT_EOP) || (length <= 4))) { /* All receives must fit into a single buffer */ E1000_DBG("%s: Receive packet consumed multiple" " buffers\n", netdev->name); diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index f9a846b1b92..9f6a68fb7b4 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c @@ -897,6 +897,12 @@ enum { }; static int phy_cross = NV_CROSSOVER_DETECTION_DISABLED; +/* + * Power down phy when interface is down (persists through reboot; + * older Linux and other OSes may not power it up again) + */ +static int phy_power_down = 0; + static inline struct fe_priv *get_nvpriv(struct net_device *dev) { return netdev_priv(dev); @@ -1485,7 +1491,10 @@ static int phy_init(struct net_device *dev) /* restart auto negotiation, power down phy */ mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); - mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE | BMCR_PDOWN); + mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE); + if (phy_power_down) { + mii_control |= BMCR_PDOWN; + } if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) { return PHY_ERROR; } @@ -5513,7 +5522,7 @@ static int nv_close(struct net_device *dev) nv_drain_rxtx(dev); - if (np->wolenabled) { + if (np->wolenabled || !phy_power_down) { writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags); nv_start_rx(dev); } else { @@ -6367,6 +6376,8 @@ module_param(dma_64bit, int, 0); MODULE_PARM_DESC(dma_64bit, "High DMA is enabled by setting to 1 and disabled by setting to 0."); module_param(phy_cross, int, 0); MODULE_PARM_DESC(phy_cross, "Phy crossover detection for Realtek 8201 phy is enabled by setting to 1 and disabled by setting to 0."); +module_param(phy_power_down, int, 0); +MODULE_PARM_DESC(phy_power_down, "Power down phy and disable link when interface is down (1), or leave phy powered up (0)."); MODULE_AUTHOR("Manfred Spraul "); MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver"); diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index b2c49679bba..a0519184e54 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c @@ -1885,8 +1885,17 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit) if (unlikely(!newskb)) newskb = skb; - else if (skb) + else if (skb) { + /* + * We need to reset ->data to what it + * was before gfar_new_skb() re-aligned + * it to an RXBUF_ALIGNMENT boundary + * before we put the skb back on the + * recycle list. + */ + skb->data = skb->head + NET_SKB_PAD; __skb_queue_head(&priv->rx_recycle, skb); + } } else { /* Increment the number of packets */ dev->stats.rx_packets++; diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h index 0642d52aef5..cf352961ae9 100644 --- a/drivers/net/gianfar.h +++ b/drivers/net/gianfar.h @@ -259,7 +259,7 @@ extern const char gfar_driver_version[]; (IEVENT_RXC | IEVENT_BSY | IEVENT_EBERR | IEVENT_MSRO | \ IEVENT_BABT | IEVENT_TXC | IEVENT_TXE | IEVENT_LC \ | IEVENT_CRL | IEVENT_XFUN | IEVENT_DPE | IEVENT_PERR \ - | IEVENT_MAG) + | IEVENT_MAG | IEVENT_BABR) #define IMASK_INIT_CLEAR 0x00000000 #define IMASK_BABR 0x80000000 diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index 08c801490c7..e25343588fc 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c @@ -2006,7 +2006,7 @@ static void igb_setup_rctl(struct igb_adapter *adapter) struct e1000_hw *hw = &adapter->hw; u32 rctl; u32 srrctl = 0; - int i, j; + int i; rctl = rd32(E1000_RCTL); @@ -2071,8 +2071,6 @@ static void igb_setup_rctl(struct igb_adapter *adapter) if (adapter->vfs_allocated_count) { u32 vmolr; - j = adapter->rx_ring[0].reg_idx; - /* set all queue drop enable bits */ wr32(E1000_QDE, ALL_QUEUES); srrctl |= E1000_SRRCTL_DROP_EN; @@ -2080,16 +2078,16 @@ static void igb_setup_rctl(struct igb_adapter *adapter) /* disable queue 0 to prevent tail write w/o re-config */ wr32(E1000_RXDCTL(0), 0); - vmolr = rd32(E1000_VMOLR(j)); + vmolr = rd32(E1000_VMOLR(adapter->vfs_allocated_count)); if (rctl & E1000_RCTL_LPE) vmolr |= E1000_VMOLR_LPE; - if (adapter->num_rx_queues > 0) + if (adapter->num_rx_queues > 1) vmolr |= E1000_VMOLR_RSSE; - wr32(E1000_VMOLR(j), vmolr); + wr32(E1000_VMOLR(adapter->vfs_allocated_count), vmolr); } for (i = 0; i < adapter->num_rx_queues; i++) { - j = adapter->rx_ring[i].reg_idx; + int j = adapter->rx_ring[i].reg_idx; wr32(E1000_SRRCTL(j), srrctl); } diff --git a/drivers/net/mac8390.c b/drivers/net/mac8390.c index 8e884869a05..22e74a0e036 100644 --- a/drivers/net/mac8390.c +++ b/drivers/net/mac8390.c @@ -304,7 +304,7 @@ struct net_device * __init mac8390_probe(int unit) if (!MACH_IS_MAC) return ERR_PTR(-ENODEV); - dev = alloc_ei_netdev(); + dev = ____alloc_ei_netdev(0); if (!dev) return ERR_PTR(-ENOMEM); @@ -481,15 +481,15 @@ void cleanup_module(void) static const struct net_device_ops mac8390_netdev_ops = { .ndo_open = mac8390_open, .ndo_stop = mac8390_close, - .ndo_start_xmit = ei_start_xmit, - .ndo_tx_timeout = ei_tx_timeout, - .ndo_get_stats = ei_get_stats, - .ndo_set_multicast_list = ei_set_multicast_list, + .ndo_start_xmit = __ei_start_xmit, + .ndo_tx_timeout = __ei_tx_timeout, + .ndo_get_stats = __ei_get_stats, + .ndo_set_multicast_list = __ei_set_multicast_list, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, .ndo_change_mtu = eth_change_mtu, #ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = ei_poll, + .ndo_poll_controller = __ei_poll, #endif }; diff --git a/drivers/net/meth.c b/drivers/net/meth.c index aa08987f6e8..dbd3436912b 100644 --- a/drivers/net/meth.c +++ b/drivers/net/meth.c @@ -127,11 +127,11 @@ static unsigned long mdio_read(struct meth_private *priv, unsigned long phyreg) static int mdio_probe(struct meth_private *priv) { int i; - unsigned long p2, p3; + unsigned long p2, p3, flags; /* check if phy is detected already */ if(priv->phy_addr>=0&&priv->phy_addr<32) return 0; - spin_lock(&priv->meth_lock); + spin_lock_irqsave(&priv->meth_lock, flags); for (i=0;i<32;++i){ priv->phy_addr=i; p2=mdio_read(priv,2); @@ -157,7 +157,7 @@ static int mdio_probe(struct meth_private *priv) break; } } - spin_unlock(&priv->meth_lock); + spin_unlock_irqrestore(&priv->meth_lock, flags); if(priv->phy_addr<32) { return 0; } @@ -373,14 +373,14 @@ static int meth_release(struct net_device *dev) static void meth_rx(struct net_device* dev, unsigned long int_status) { struct sk_buff *skb; - unsigned long status; + unsigned long status, flags; struct meth_private *priv = netdev_priv(dev); unsigned long fifo_rptr = (int_status & METH_INT_RX_RPTR_MASK) >> 8; - spin_lock(&priv->meth_lock); + spin_lock_irqsave(&priv->meth_lock, flags); priv->dma_ctrl &= ~METH_DMA_RX_INT_EN; mace->eth.dma_ctrl = priv->dma_ctrl; - spin_unlock(&priv->meth_lock); + spin_unlock_irqrestore(&priv->meth_lock, flags); if (int_status & METH_INT_RX_UNDERFLOW) { fifo_rptr = (fifo_rptr - 1) & 0x0f; @@ -452,12 +452,12 @@ static void meth_rx(struct net_device* dev, unsigned long int_status) mace->eth.rx_fifo = priv->rx_ring_dmas[priv->rx_write]; ADVANCE_RX_PTR(priv->rx_write); } - spin_lock(&priv->meth_lock); + spin_lock_irqsave(&priv->meth_lock, flags); /* In case there was underflow, and Rx DMA was disabled */ priv->dma_ctrl |= METH_DMA_RX_INT_EN | METH_DMA_RX_EN; mace->eth.dma_ctrl = priv->dma_ctrl; mace->eth.int_stat = METH_INT_RX_THRESHOLD; - spin_unlock(&priv->meth_lock); + spin_unlock_irqrestore(&priv->meth_lock, flags); } static int meth_tx_full(struct net_device *dev) @@ -470,11 +470,11 @@ static int meth_tx_full(struct net_device *dev) static void meth_tx_cleanup(struct net_device* dev, unsigned long int_status) { struct meth_private *priv = netdev_priv(dev); - unsigned long status; + unsigned long status, flags; struct sk_buff *skb; unsigned long rptr = (int_status&TX_INFO_RPTR) >> 16; - spin_lock(&priv->meth_lock); + spin_lock_irqsave(&priv->meth_lock, flags); /* Stop DMA notification */ priv->dma_ctrl &= ~(METH_DMA_TX_INT_EN); @@ -527,12 +527,13 @@ static void meth_tx_cleanup(struct net_device* dev, unsigned long int_status) } mace->eth.int_stat = METH_INT_TX_EMPTY | METH_INT_TX_PKT; - spin_unlock(&priv->meth_lock); + spin_unlock_irqrestore(&priv->meth_lock, flags); } static void meth_error(struct net_device* dev, unsigned status) { struct meth_private *priv = netdev_priv(dev); + unsigned long flags; printk(KERN_WARNING "meth: error status: 0x%08x\n",status); /* check for errors too... */ @@ -547,7 +548,7 @@ static void meth_error(struct net_device* dev, unsigned status) printk(KERN_WARNING "meth: Rx overflow\n"); if (status & (METH_INT_RX_UNDERFLOW)) { printk(KERN_WARNING "meth: Rx underflow\n"); - spin_lock(&priv->meth_lock); + spin_lock_irqsave(&priv->meth_lock, flags); mace->eth.int_stat = METH_INT_RX_UNDERFLOW; /* more underflow interrupts will be delivered, * effectively throwing us into an infinite loop. @@ -555,7 +556,7 @@ static void meth_error(struct net_device* dev, unsigned status) priv->dma_ctrl &= ~METH_DMA_RX_EN; mace->eth.dma_ctrl = priv->dma_ctrl; DPRINTK("Disabled meth Rx DMA temporarily\n"); - spin_unlock(&priv->meth_lock); + spin_unlock_irqrestore(&priv->meth_lock, flags); } mace->eth.int_stat = METH_INT_ERROR; } diff --git a/drivers/net/mlx4/en_cq.c b/drivers/net/mlx4/en_cq.c index 91f50de84be..a276125b709 100644 --- a/drivers/net/mlx4/en_cq.c +++ b/drivers/net/mlx4/en_cq.c @@ -125,8 +125,10 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) if (cq->is_tx) del_timer(&cq->timer); - else + else { napi_disable(&cq->napi); + netif_napi_del(&cq->napi); + } mlx4_cq_free(mdev->dev, &cq->mcq); } diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c index 7942c4d3cd8..9ee873e872b 100644 --- a/drivers/net/mlx4/en_rx.c +++ b/drivers/net/mlx4/en_rx.c @@ -951,7 +951,6 @@ static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, if (err) { mlx4_err(mdev, "Failed to allocate qp #%d\n", qpn); goto out; - return err; } qp->event = mlx4_en_sqp_event; diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c index ac6fc499b28..e5c98a98ad3 100644 --- a/drivers/net/mlx4/en_tx.c +++ b/drivers/net/mlx4/en_tx.c @@ -426,7 +426,7 @@ void mlx4_en_poll_tx_cq(unsigned long data) INC_PERF_COUNTER(priv->pstats.tx_poll); - if (!spin_trylock(&ring->comp_lock)) { + if (!spin_trylock_irq(&ring->comp_lock)) { mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT); return; } @@ -439,7 +439,7 @@ void mlx4_en_poll_tx_cq(unsigned long data) if (inflight && priv->port_up) mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT); - spin_unlock(&ring->comp_lock); + spin_unlock_irq(&ring->comp_lock); } static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv, @@ -482,9 +482,9 @@ static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind) /* Poll the CQ every mlx4_en_TX_MODER_POLL packets */ if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0) - if (spin_trylock(&ring->comp_lock)) { + if (spin_trylock_irq(&ring->comp_lock)) { mlx4_en_process_tx_cq(priv->dev, cq); - spin_unlock(&ring->comp_lock); + spin_unlock_irq(&ring->comp_lock); } } diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c index a400d7115f7..6bb5af35eda 100644 --- a/drivers/net/mv643xx_eth.c +++ b/drivers/net/mv643xx_eth.c @@ -569,7 +569,7 @@ static int rxq_process(struct rx_queue *rxq, int budget) if (rxq->rx_curr_desc == rxq->rx_ring_size) rxq->rx_curr_desc = 0; - dma_unmap_single(NULL, rx_desc->buf_ptr, + dma_unmap_single(mp->dev->dev.parent, rx_desc->buf_ptr, rx_desc->buf_size, DMA_FROM_DEVICE); rxq->rx_desc_count--; rx++; @@ -678,8 +678,9 @@ static int rxq_refill(struct rx_queue *rxq, int budget) rx_desc = rxq->rx_desc_area + rx; - rx_desc->buf_ptr = dma_map_single(NULL, skb->data, - mp->skb_size, DMA_FROM_DEVICE); + rx_desc->buf_ptr = dma_map_single(mp->dev->dev.parent, + skb->data, mp->skb_size, + DMA_FROM_DEVICE); rx_desc->buf_size = mp->skb_size; rxq->rx_skb[rx] = skb; wmb(); @@ -718,6 +719,7 @@ static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb) static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb) { + struct mv643xx_eth_private *mp = txq_to_mp(txq); int nr_frags = skb_shinfo(skb)->nr_frags; int frag; @@ -746,10 +748,10 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb) desc->l4i_chk = 0; desc->byte_cnt = this_frag->size; - desc->buf_ptr = dma_map_page(NULL, this_frag->page, - this_frag->page_offset, - this_frag->size, - DMA_TO_DEVICE); + desc->buf_ptr = dma_map_page(mp->dev->dev.parent, + this_frag->page, + this_frag->page_offset, + this_frag->size, DMA_TO_DEVICE); } } @@ -826,7 +828,8 @@ no_csum: desc->l4i_chk = l4i_chk; desc->byte_cnt = length; - desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE); + desc->buf_ptr = dma_map_single(mp->dev->dev.parent, skb->data, + length, DMA_TO_DEVICE); __skb_queue_tail(&txq->tx_skb, skb); @@ -956,10 +959,10 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force) } if (cmd_sts & TX_FIRST_DESC) { - dma_unmap_single(NULL, desc->buf_ptr, + dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr, desc->byte_cnt, DMA_TO_DEVICE); } else { - dma_unmap_page(NULL, desc->buf_ptr, + dma_unmap_page(mp->dev->dev.parent, desc->buf_ptr, desc->byte_cnt, DMA_TO_DEVICE); } @@ -1894,9 +1897,9 @@ static int rxq_init(struct mv643xx_eth_private *mp, int index) mp->rx_desc_sram_size); rxq->rx_desc_dma = mp->rx_desc_sram_addr; } else { - rxq->rx_desc_area = dma_alloc_coherent(NULL, size, - &rxq->rx_desc_dma, - GFP_KERNEL); + rxq->rx_desc_area = dma_alloc_coherent(mp->dev->dev.parent, + size, &rxq->rx_desc_dma, + GFP_KERNEL); } if (rxq->rx_desc_area == NULL) { @@ -1947,7 +1950,7 @@ out_free: if (index == 0 && size <= mp->rx_desc_sram_size) iounmap(rxq->rx_desc_area); else - dma_free_coherent(NULL, size, + dma_free_coherent(mp->dev->dev.parent, size, rxq->rx_desc_area, rxq->rx_desc_dma); @@ -1979,7 +1982,7 @@ static void rxq_deinit(struct rx_queue *rxq) rxq->rx_desc_area_size <= mp->rx_desc_sram_size) iounmap(rxq->rx_desc_area); else - dma_free_coherent(NULL, rxq->rx_desc_area_size, + dma_free_coherent(mp->dev->dev.parent, rxq->rx_desc_area_size, rxq->rx_desc_area, rxq->rx_desc_dma); kfree(rxq->rx_skb); @@ -2007,9 +2010,9 @@ static int txq_init(struct mv643xx_eth_private *mp, int index) mp->tx_desc_sram_size); txq->tx_desc_dma = mp->tx_desc_sram_addr; } else { - txq->tx_desc_area = dma_alloc_coherent(NULL, size, - &txq->tx_desc_dma, - GFP_KERNEL); + txq->tx_desc_area = dma_alloc_coherent(mp->dev->dev.parent, + size, &txq->tx_desc_dma, + GFP_KERNEL); } if (txq->tx_desc_area == NULL) { @@ -2053,7 +2056,7 @@ static void txq_deinit(struct tx_queue *txq) txq->tx_desc_area_size <= mp->tx_desc_sram_size) iounmap(txq->tx_desc_area); else - dma_free_coherent(NULL, txq->tx_desc_area_size, + dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size, txq->tx_desc_area, txq->tx_desc_dma); } diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index 0b6e8c89683..3b19e0ce290 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c @@ -66,7 +66,6 @@ static const int multicast_filter_limit = 32; #define RX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */ #define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */ #define EarlyTxThld 0x3F /* 0x3F means NO early transmit */ -#define RxPacketMaxSize 0x3FE8 /* 16K - 1 - ETH_HLEN - VLAN - CRC... */ #define SafeMtu 0x1c20 /* ... actually life sucks beyond ~7k */ #define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */ @@ -2357,10 +2356,10 @@ static u16 rtl_rw_cpluscmd(void __iomem *ioaddr) return cmd; } -static void rtl_set_rx_max_size(void __iomem *ioaddr) +static void rtl_set_rx_max_size(void __iomem *ioaddr, unsigned int rx_buf_sz) { /* Low hurts. Let's disable the filtering. */ - RTL_W16(RxMaxSize, 16383); + RTL_W16(RxMaxSize, rx_buf_sz); } static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version) @@ -2407,7 +2406,7 @@ static void rtl_hw_start_8169(struct net_device *dev) RTL_W8(EarlyTxThres, EarlyTxThld); - rtl_set_rx_max_size(ioaddr); + rtl_set_rx_max_size(ioaddr, tp->rx_buf_sz); if ((tp->mac_version == RTL_GIGA_MAC_VER_01) || (tp->mac_version == RTL_GIGA_MAC_VER_02) || @@ -2668,7 +2667,7 @@ static void rtl_hw_start_8168(struct net_device *dev) RTL_W8(EarlyTxThres, EarlyTxThld); - rtl_set_rx_max_size(ioaddr); + rtl_set_rx_max_size(ioaddr, tp->rx_buf_sz); tp->cp_cmd |= RTL_R16(CPlusCmd) | PktCntrDisable | INTT_1; @@ -2846,7 +2845,7 @@ static void rtl_hw_start_8101(struct net_device *dev) RTL_W8(EarlyTxThres, EarlyTxThld); - rtl_set_rx_max_size(ioaddr); + rtl_set_rx_max_size(ioaddr, tp->rx_buf_sz); tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW; @@ -3554,54 +3553,64 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) int handled = 0; int status; + /* loop handling interrupts until we have no new ones or + * we hit a invalid/hotplug case. + */ status = RTL_R16(IntrStatus); + while (status && status != 0xffff) { + handled = 1; - /* hotplug/major error/no more work/shared irq */ - if ((status == 0xffff) || !status) - goto out; - - handled = 1; - - if (unlikely(!netif_running(dev))) { - rtl8169_asic_down(ioaddr); - goto out; - } - - status &= tp->intr_mask; - RTL_W16(IntrStatus, - (status & RxFIFOOver) ? (status | RxOverflow) : status); - - if (!(status & tp->intr_event)) - goto out; - - /* Work around for rx fifo overflow */ - if (unlikely(status & RxFIFOOver) && - (tp->mac_version == RTL_GIGA_MAC_VER_11)) { - netif_stop_queue(dev); - rtl8169_tx_timeout(dev); - goto out; - } - - if (unlikely(status & SYSErr)) { - rtl8169_pcierr_interrupt(dev); - goto out; - } - - if (status & LinkChg) - rtl8169_check_link_status(dev, tp, ioaddr); - - if (status & tp->napi_event) { - RTL_W16(IntrMask, tp->intr_event & ~tp->napi_event); - tp->intr_mask = ~tp->napi_event; - - if (likely(napi_schedule_prep(&tp->napi))) - __napi_schedule(&tp->napi); - else if (netif_msg_intr(tp)) { - printk(KERN_INFO "%s: interrupt %04x in poll\n", - dev->name, status); + /* Handle all of the error cases first. These will reset + * the chip, so just exit the loop. + */ + if (unlikely(!netif_running(dev))) { + rtl8169_asic_down(ioaddr); + break; } + + /* Work around for rx fifo overflow */ + if (unlikely(status & RxFIFOOver) && + (tp->mac_version == RTL_GIGA_MAC_VER_11)) { + netif_stop_queue(dev); + rtl8169_tx_timeout(dev); + break; + } + + if (unlikely(status & SYSErr)) { + rtl8169_pcierr_interrupt(dev); + break; + } + + if (status & LinkChg) + rtl8169_check_link_status(dev, tp, ioaddr); + + /* We need to see the lastest version of tp->intr_mask to + * avoid ignoring an MSI interrupt and having to wait for + * another event which may never come. + */ + smp_rmb(); + if (status & tp->intr_mask & tp->napi_event) { + RTL_W16(IntrMask, tp->intr_event & ~tp->napi_event); + tp->intr_mask = ~tp->napi_event; + + if (likely(napi_schedule_prep(&tp->napi))) + __napi_schedule(&tp->napi); + else if (netif_msg_intr(tp)) { + printk(KERN_INFO "%s: interrupt %04x in poll\n", + dev->name, status); + } + } + + /* We only get a new MSI interrupt when all active irq + * sources on the chip have been acknowledged. So, ack + * everything we've seen and check if new sources have become + * active to avoid blocking all interrupts from the chip. + */ + RTL_W16(IntrStatus, + (status & RxFIFOOver) ? (status | RxOverflow) : status); + status = RTL_R16(IntrStatus); } -out: + return IRQ_RETVAL(handled); } @@ -3617,13 +3626,15 @@ static int rtl8169_poll(struct napi_struct *napi, int budget) if (work_done < budget) { napi_complete(napi); - tp->intr_mask = 0xffff; - /* - * 20040426: the barrier is not strictly required but the - * behavior of the irq handler could be less predictable - * without it. Btw, the lack of flush for the posted pci - * write is safe - FR + + /* We need for force the visibility of tp->intr_mask + * for other CPUs, as we can loose an MSI interrupt + * and potentially wait for a retransmit timeout if we don't. + * The posted write to IntrMask is safe, as it will + * eventually make it to the chip and we won't loose anything + * until it does. */ + tp->intr_mask = 0xffff; smp_wmb(); RTL_W16(IntrMask, tp->intr_event); } diff --git a/drivers/net/vxge/vxge-traffic.c b/drivers/net/vxge/vxge-traffic.c index 7be0ae10d69..c2eeac4125f 100644 --- a/drivers/net/vxge/vxge-traffic.c +++ b/drivers/net/vxge/vxge-traffic.c @@ -115,7 +115,7 @@ enum vxge_hw_status vxge_hw_vpath_intr_enable(struct __vxge_hw_vpath_handle *vp) VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON| VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON| VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR| - VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR), 0, 32), + VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR), 0, 32), &vp_reg->kdfcctl_errors_mask); __vxge_hw_pio_mem_write32_upper(0, &vp_reg->vpath_ppif_int_mask); diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c index 3bf7d3f447d..765a7f5d6aa 100644 --- a/drivers/net/wan/ixp4xx_hss.c +++ b/drivers/net/wan/ixp4xx_hss.c @@ -1249,7 +1249,7 @@ static int __devinit hss_init_one(struct platform_device *pdev) return -ENOMEM; if ((port->npe = npe_request(0)) == NULL) { - err = -ENOSYS; + err = -ENODEV; goto err_free; } @@ -1311,7 +1311,7 @@ static int __init hss_init_module(void) if ((ixp4xx_read_feature_bits() & (IXP4XX_FEATURE_HDLC | IXP4XX_FEATURE_HSS)) != (IXP4XX_FEATURE_HDLC | IXP4XX_FEATURE_HSS)) - return -ENOSYS; + return -ENODEV; spin_lock_init(&npe_lock); diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c index 02419bfd64b..f9fc3890232 100644 --- a/drivers/net/wimax/i2400m/rx.c +++ b/drivers/net/wimax/i2400m/rx.c @@ -819,10 +819,9 @@ void i2400m_roq_queue_update_ws(struct i2400m *i2400m, struct i2400m_roq *roq, roq_data = (struct i2400m_roq_data *) &skb->cb; i2400m_net_erx(i2400m, skb, roq_data->cs); } - else { + else __i2400m_roq_queue(i2400m, roq, skb, sn, nsn); - __i2400m_roq_update_ws(i2400m, roq, sn + 1); - } + __i2400m_roq_update_ws(i2400m, roq, sn + 1); i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_PACKET_WS, old_ws, len, sn, nsn, roq->ws); } diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c index ca4151a9e22..17851321b7f 100644 --- a/drivers/net/wimax/i2400m/usb.c +++ b/drivers/net/wimax/i2400m/usb.c @@ -505,27 +505,52 @@ int i2400mu_suspend(struct usb_interface *iface, pm_message_t pm_msg) #ifdef CONFIG_PM struct usb_device *usb_dev = i2400mu->usb_dev; #endif + unsigned is_autosuspend = 0; struct i2400m *i2400m = &i2400mu->i2400m; +#ifdef CONFIG_PM + if (usb_dev->auto_pm > 0) + is_autosuspend = 1; +#endif + d_fnstart(3, dev, "(iface %p pm_msg %u)\n", iface, pm_msg.event); if (i2400m->updown == 0) goto no_firmware; - d_printf(1, dev, "fw up, requesting standby\n"); + if (i2400m->state == I2400M_SS_DATA_PATH_CONNECTED && is_autosuspend) { + /* ugh -- the device is connected and this suspend + * request is an autosuspend one (not a system standby + * / hibernate). + * + * The only way the device can go to standby is if the + * link with the base station is in IDLE mode; that + * were the case, we'd be in status + * I2400M_SS_CONNECTED_IDLE. But we are not. + * + * If we *tell* him to go power save now, it'll reset + * as a precautionary measure, so if this is an + * autosuspend thing, say no and it'll come back + * later, when the link is IDLE + */ + result = -EBADF; + d_printf(1, dev, "fw up, link up, not-idle, autosuspend: " + "not entering powersave\n"); + goto error_not_now; + } + d_printf(1, dev, "fw up: entering powersave\n"); atomic_dec(&i2400mu->do_autopm); result = i2400m_cmd_enter_powersave(i2400m); atomic_inc(&i2400mu->do_autopm); -#ifdef CONFIG_PM - if (result < 0 && usb_dev->auto_pm == 0) { + if (result < 0 && !is_autosuspend) { /* System suspend, can't fail */ dev_err(dev, "failed to suspend, will reset on resume\n"); result = 0; } -#endif if (result < 0) goto error_enter_powersave; i2400mu_notification_release(i2400mu); - d_printf(1, dev, "fw up, got standby\n"); + d_printf(1, dev, "powersave requested\n"); error_enter_powersave: +error_not_now: no_firmware: d_fnend(3, dev, "(iface %p pm_msg %u) = %d\n", iface, pm_msg.event, result); diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig index 8a0823588c5..3d94e7dfea6 100644 --- a/drivers/net/wireless/Kconfig +++ b/drivers/net/wireless/Kconfig @@ -430,6 +430,7 @@ config RTL8187 ASUS P5B Deluxe Toshiba Satellite Pro series of laptops Asus Wireless Link + Linksys WUSB54GC-EU Thanks to Realtek for their support! diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c index c36d3a3d655..9eabf4d1f2e 100644 --- a/drivers/net/wireless/airo.c +++ b/drivers/net/wireless/airo.c @@ -6467,6 +6467,7 @@ static int airo_get_encode(struct net_device *dev, { struct airo_info *local = dev->ml_priv; int index = (dwrq->flags & IW_ENCODE_INDEX) - 1; + int wep_key_len; u8 buf[16]; if (!local->wep_capable) @@ -6500,8 +6501,13 @@ static int airo_get_encode(struct net_device *dev, dwrq->flags |= index + 1; /* Copy the key to the user buffer */ - dwrq->length = get_wep_key(local, index, &buf[0], sizeof(buf)); - memcpy(extra, buf, dwrq->length); + wep_key_len = get_wep_key(local, index, &buf[0], sizeof(buf)); + if (wep_key_len < 0) { + dwrq->length = 0; + } else { + dwrq->length = wep_key_len; + memcpy(extra, buf, dwrq->length); + } return 0; } @@ -6614,7 +6620,7 @@ static int airo_get_encodeext(struct net_device *dev, struct airo_info *local = dev->ml_priv; struct iw_point *encoding = &wrqu->encoding; struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; - int idx, max_key_len; + int idx, max_key_len, wep_key_len; u8 buf[16]; if (!local->wep_capable) @@ -6658,8 +6664,13 @@ static int airo_get_encodeext(struct net_device *dev, memset(extra, 0, 16); /* Copy the key to the user buffer */ - ext->key_len = get_wep_key(local, idx, &buf[0], sizeof(buf)); - memcpy(extra, buf, ext->key_len); + wep_key_len = get_wep_key(local, idx, &buf[0], sizeof(buf)); + if (wep_key_len < 0) { + ext->key_len = 0; + } else { + ext->key_len = wep_key_len; + memcpy(extra, buf, ext->key_len); + } return 0; } diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c index 744f4f4dd3d..8d93ca4651b 100644 --- a/drivers/net/wireless/at76c50x-usb.c +++ b/drivers/net/wireless/at76c50x-usb.c @@ -1873,18 +1873,18 @@ static void at76_dwork_hw_scan(struct work_struct *work) if (ret != CMD_STATUS_COMPLETE) { queue_delayed_work(priv->hw->workqueue, &priv->dwork_hw_scan, SCAN_POLL_INTERVAL); - goto exit; + mutex_unlock(&priv->mtx); + return; } - ieee80211_scan_completed(priv->hw, false); - if (is_valid_ether_addr(priv->bssid)) at76_join(priv); - ieee80211_wake_queues(priv->hw); - -exit: mutex_unlock(&priv->mtx); + + ieee80211_scan_completed(priv->hw, false); + + ieee80211_wake_queues(priv->hw); } static int at76_hw_scan(struct ieee80211_hw *hw, diff --git a/drivers/net/wireless/ath5k/base.c b/drivers/net/wireless/ath5k/base.c index a08bc8a4fb6..32df27a9c7a 100644 --- a/drivers/net/wireless/ath5k/base.c +++ b/drivers/net/wireless/ath5k/base.c @@ -214,7 +214,7 @@ static struct pci_driver ath5k_pci_driver = { * Prototypes - MAC 802.11 stack related functions */ static int ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb); -static int ath5k_reset(struct ath5k_softc *sc, bool stop, bool change_channel); +static int ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan); static int ath5k_reset_wake(struct ath5k_softc *sc); static int ath5k_start(struct ieee80211_hw *hw); static void ath5k_stop(struct ieee80211_hw *hw); @@ -1038,16 +1038,13 @@ ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan) if (chan->center_freq != sc->curchan->center_freq || chan->hw_value != sc->curchan->hw_value) { - sc->curchan = chan; - sc->curband = &sc->sbands[chan->band]; - /* * To switch channels clear any pending DMA operations; * wait long enough for the RX fifo to drain, reset the * hardware at the new frequency, and then re-enable * the relevant bits of the h/w. */ - return ath5k_reset(sc, true, true); + return ath5k_reset(sc, chan); } return 0; @@ -2314,7 +2311,7 @@ ath5k_init(struct ath5k_softc *sc) sc->imask = AR5K_INT_RXOK | AR5K_INT_RXERR | AR5K_INT_RXEOL | AR5K_INT_RXORN | AR5K_INT_TXDESC | AR5K_INT_TXEOL | AR5K_INT_FATAL | AR5K_INT_GLOBAL; - ret = ath5k_reset(sc, false, false); + ret = ath5k_reset(sc, NULL); if (ret) goto done; @@ -2599,18 +2596,25 @@ drop_packet: return NETDEV_TX_OK; } +/* + * Reset the hardware. If chan is not NULL, then also pause rx/tx + * and change to the given channel. + */ static int -ath5k_reset(struct ath5k_softc *sc, bool stop, bool change_channel) +ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan) { struct ath5k_hw *ah = sc->ah; int ret; ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "resetting\n"); - if (stop) { + if (chan) { ath5k_hw_set_imr(ah, 0); ath5k_txq_cleanup(sc); ath5k_rx_stop(sc); + + sc->curchan = chan; + sc->curband = &sc->sbands[chan->band]; } ret = ath5k_hw_reset(ah, sc->opmode, sc->curchan, true); if (ret) { @@ -2648,7 +2652,7 @@ ath5k_reset_wake(struct ath5k_softc *sc) { int ret; - ret = ath5k_reset(sc, true, true); + ret = ath5k_reset(sc, sc->curchan); if (!ret) ieee80211_wake_queues(sc->hw); diff --git a/drivers/net/wireless/ath5k/phy.c b/drivers/net/wireless/ath5k/phy.c index 9e2faae5ae9..b48b29dca3d 100644 --- a/drivers/net/wireless/ath5k/phy.c +++ b/drivers/net/wireless/ath5k/phy.c @@ -1487,28 +1487,35 @@ ath5k_get_linear_pcdac_min(const u8 *stepL, const u8 *stepR, { s8 tmp; s16 min_pwrL, min_pwrR; - s16 pwr_i = pwrL[0]; + s16 pwr_i; - do { - pwr_i--; - tmp = (s8) ath5k_get_interpolated_value(pwr_i, - pwrL[0], pwrL[1], - stepL[0], stepL[1]); + if (pwrL[0] == pwrL[1]) + min_pwrL = pwrL[0]; + else { + pwr_i = pwrL[0]; + do { + pwr_i--; + tmp = (s8) ath5k_get_interpolated_value(pwr_i, + pwrL[0], pwrL[1], + stepL[0], stepL[1]); + } while (tmp > 1); - } while (tmp > 1); + min_pwrL = pwr_i; + } - min_pwrL = pwr_i; + if (pwrR[0] == pwrR[1]) + min_pwrR = pwrR[0]; + else { + pwr_i = pwrR[0]; + do { + pwr_i--; + tmp = (s8) ath5k_get_interpolated_value(pwr_i, + pwrR[0], pwrR[1], + stepR[0], stepR[1]); + } while (tmp > 1); - pwr_i = pwrR[0]; - do { - pwr_i--; - tmp = (s8) ath5k_get_interpolated_value(pwr_i, - pwrR[0], pwrR[1], - stepR[0], stepR[1]); - - } while (tmp > 1); - - min_pwrR = pwr_i; + min_pwrR = pwr_i; + } /* Keep the right boundary so that it works for both curves */ return max(min_pwrL, min_pwrR); diff --git a/drivers/net/wireless/ath5k/reset.c b/drivers/net/wireless/ath5k/reset.c index 7a17d31b2fd..5f72c111c2e 100644 --- a/drivers/net/wireless/ath5k/reset.c +++ b/drivers/net/wireless/ath5k/reset.c @@ -26,7 +26,7 @@ \*****************************/ #include /* To determine if a card is pci-e */ -#include /* For get_bitmask_order */ +#include #include "ath5k.h" #include "reg.h" #include "base.h" @@ -69,10 +69,10 @@ static inline int ath5k_hw_write_ofdm_timings(struct ath5k_hw *ah, /* Get exponent * ALGO: coef_exp = 14 - highest set bit position */ - coef_exp = get_bitmask_order(coef_scaled); + coef_exp = ilog2(coef_scaled); /* Doesn't make sense if it's zero*/ - if (!coef_exp) + if (!coef_scaled || !coef_exp) return -EINVAL; /* Note: we've shifted coef_scaled by 24 */ @@ -359,7 +359,7 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial) mode |= AR5K_PHY_MODE_FREQ_5GHZ; if (ah->ah_radio == AR5K_RF5413) - clock |= AR5K_PHY_PLL_40MHZ_5413; + clock = AR5K_PHY_PLL_40MHZ_5413; else clock |= AR5K_PHY_PLL_40MHZ; diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c index e5ca2511a81..9452461ce86 100644 --- a/drivers/net/wireless/iwlwifi/iwl-5000.c +++ b/drivers/net/wireless/iwlwifi/iwl-5000.c @@ -46,7 +46,7 @@ #include "iwl-6000-hw.h" /* Highest firmware API version supported */ -#define IWL5000_UCODE_API_MAX 1 +#define IWL5000_UCODE_API_MAX 2 #define IWL5150_UCODE_API_MAX 2 /* Lowest firmware API version supported */ diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c index edfa5e149f7..bd438d8acf5 100644 --- a/drivers/net/wireless/iwlwifi/iwl-6000.c +++ b/drivers/net/wireless/iwlwifi/iwl-6000.c @@ -101,8 +101,8 @@ struct iwl_cfg iwl6000_2agn_cfg = { .eeprom_ver = EEPROM_5000_EEPROM_VERSION, .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, .mod_params = &iwl50_mod_params, - .valid_tx_ant = ANT_BC, - .valid_rx_ant = ANT_BC, + .valid_tx_ant = ANT_AB, + .valid_rx_ant = ANT_AB, .need_pll_cfg = false, }; @@ -117,8 +117,8 @@ struct iwl_cfg iwl6050_2agn_cfg = { .eeprom_ver = EEPROM_5000_EEPROM_VERSION, .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, .mod_params = &iwl50_mod_params, - .valid_tx_ant = ANT_BC, - .valid_rx_ant = ANT_BC, + .valid_tx_ant = ANT_AB, + .valid_rx_ant = ANT_AB, .need_pll_cfg = false, }; diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c index 1ef4192207a..f46ba247577 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c @@ -669,13 +669,6 @@ static int iwl_set_mode(struct iwl_priv *priv, int mode) if (!iwl_is_ready_rf(priv)) return -EAGAIN; - cancel_delayed_work(&priv->scan_check); - if (iwl_scan_cancel_timeout(priv, 100)) { - IWL_WARN(priv, "Aborted scan still in progress after 100ms\n"); - IWL_DEBUG_MAC80211(priv, "leaving - scan abort failed.\n"); - return -EAGAIN; - } - iwl_commit_rxon(priv); return 0; @@ -3636,7 +3629,9 @@ static struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x0085, 0x1112, iwl6000_2ag_cfg)}, {IWL_PCI_DEVICE(0x0082, 0x1122, iwl6000_2ag_cfg)}, {IWL_PCI_DEVICE(0x422B, PCI_ANY_ID, iwl6000_3agn_cfg)}, + {IWL_PCI_DEVICE(0x422C, PCI_ANY_ID, iwl6000_2agn_cfg)}, {IWL_PCI_DEVICE(0x4238, PCI_ANY_ID, iwl6000_3agn_cfg)}, + {IWL_PCI_DEVICE(0x4239, PCI_ANY_ID, iwl6000_2agn_cfg)}, {IWL_PCI_DEVICE(0x0082, PCI_ANY_ID, iwl6000_2agn_cfg)}, {IWL_PCI_DEVICE(0x0085, PCI_ANY_ID, iwl6000_3agn_cfg)}, {IWL_PCI_DEVICE(0x0086, PCI_ANY_ID, iwl6050_3agn_cfg)}, diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c index e7c65c4f741..6330b91e37c 100644 --- a/drivers/net/wireless/iwlwifi/iwl-scan.c +++ b/drivers/net/wireless/iwlwifi/iwl-scan.c @@ -227,9 +227,6 @@ static void iwl_rx_scan_complete_notif(struct iwl_priv *priv, /* The HW is no longer scanning */ clear_bit(STATUS_SCAN_HW, &priv->status); - /* The scan completion notification came in, so kill that timer... */ - cancel_delayed_work(&priv->scan_check); - IWL_DEBUG_INFO(priv, "Scan pass on %sGHz took %dms\n", (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ)) ? "2.4" : "5.2", @@ -712,6 +709,8 @@ static void iwl_bg_request_scan(struct work_struct *data) mutex_lock(&priv->mutex); + cancel_delayed_work(&priv->scan_check); + if (!iwl_is_ready(priv)) { IWL_WARN(priv, "request scan called when driver not ready.\n"); goto done; @@ -925,6 +924,8 @@ void iwl_bg_scan_completed(struct work_struct *work) IWL_DEBUG_SCAN(priv, "SCAN complete scan\n"); + cancel_delayed_work(&priv->scan_check); + ieee80211_scan_completed(priv->hw, false); if (test_bit(STATUS_EXIT_PENDING, &priv->status)) diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c index 5798fe49c77..44ab03a12e4 100644 --- a/drivers/net/wireless/iwlwifi/iwl-sta.c +++ b/drivers/net/wireless/iwlwifi/iwl-sta.c @@ -719,6 +719,14 @@ static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv, { unsigned long flags; int ret = 0; + __le16 key_flags = 0; + + key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK); + key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); + key_flags &= ~STA_KEY_FLG_INVALID; + + if (sta_id == priv->hw_params.bcast_sta_id) + key_flags |= STA_KEY_MULTICAST_MSK; keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; @@ -738,6 +746,9 @@ static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv, WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET, "no space for a new key"); + priv->stations[sta_id].sta.key.key_flags = key_flags; + + /* This copy is acutally not needed: we get the key with each TX */ memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, 16); @@ -754,9 +765,7 @@ void iwl_update_tkip_key(struct iwl_priv *priv, { u8 sta_id = IWL_INVALID_STATION; unsigned long flags; - __le16 key_flags = 0; int i; - DECLARE_MAC_BUF(mac); sta_id = iwl_find_station(priv, addr); if (sta_id == IWL_INVALID_STATION) { @@ -771,16 +780,8 @@ void iwl_update_tkip_key(struct iwl_priv *priv, return; } - key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK); - key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); - key_flags &= ~STA_KEY_FLG_INVALID; - - if (sta_id == priv->hw_params.bcast_sta_id) - key_flags |= STA_KEY_MULTICAST_MSK; - spin_lock_irqsave(&priv->sta_lock, flags); - priv->stations[sta_id].sta.key.key_flags = key_flags; priv->stations[sta_id].sta.key.tkip_rx_tsc_byte2 = (u8) iv32; for (i = 0; i < 5; i++) diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c index 70a00c8ee42..ff4d0e41d7c 100644 --- a/drivers/net/wireless/iwlwifi/iwl3945-base.c +++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c @@ -782,13 +782,6 @@ static int iwl3945_set_mode(struct iwl_priv *priv, int mode) if (!iwl_is_ready_rf(priv)) return -EAGAIN; - cancel_delayed_work(&priv->scan_check); - if (iwl_scan_cancel_timeout(priv, 100)) { - IWL_WARN(priv, "Aborted scan still in progress after 100ms\n"); - IWL_DEBUG_MAC80211(priv, "leaving - scan abort failed.\n"); - return -EAGAIN; - } - iwl3945_commit_rxon(priv); return 0; @@ -1744,7 +1737,6 @@ static void iwl3945_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rx rxq->bd = NULL; rxq->rb_stts = NULL; } -EXPORT_SYMBOL(iwl3945_rx_queue_free); /* Convert linear signal-to-noise ratio into dB */ @@ -3299,6 +3291,8 @@ static void iwl3945_bg_request_scan(struct work_struct *data) mutex_lock(&priv->mutex); + cancel_delayed_work(&priv->scan_check); + if (!iwl_is_ready(priv)) { IWL_WARN(priv, "request scan called when driver not ready.\n"); goto done; diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.c b/drivers/net/wireless/rt2x00/rt2x00debug.c index 07d378ef0b4..7b3ee8c2eae 100644 --- a/drivers/net/wireless/rt2x00/rt2x00debug.c +++ b/drivers/net/wireless/rt2x00/rt2x00debug.c @@ -138,7 +138,7 @@ void rt2x00debug_update_crypto(struct rt2x00_dev *rt2x00dev, if (cipher == CIPHER_TKIP_NO_MIC) cipher = CIPHER_TKIP; - if (cipher == CIPHER_NONE || cipher > CIPHER_MAX) + if (cipher == CIPHER_NONE || cipher >= CIPHER_MAX) return; /* Remove CIPHER_NONE index */ diff --git a/drivers/net/wireless/rtl818x/rtl8187.h b/drivers/net/wireless/rtl818x/rtl8187.h index 9718f61809c..edeff82a4d0 100644 --- a/drivers/net/wireless/rtl818x/rtl8187.h +++ b/drivers/net/wireless/rtl818x/rtl8187.h @@ -120,6 +120,12 @@ struct rtl8187_priv { __le64 buf; struct sk_buff_head queue; } b_tx_status; /* This queue is used by both -b and non-b devices */ + struct mutex io_mutex; + union { + u8 bits8; + __le16 bits16; + __le32 bits32; + } *io_dmabuf; }; void rtl8187_write_phy(struct ieee80211_hw *dev, u8 addr, u32 data); @@ -129,10 +135,14 @@ static inline u8 rtl818x_ioread8_idx(struct rtl8187_priv *priv, { u8 val; + mutex_lock(&priv->io_mutex); usb_control_msg(priv->udev, usb_rcvctrlpipe(priv->udev, 0), RTL8187_REQ_GET_REG, RTL8187_REQT_READ, - (unsigned long)addr, idx & 0x03, &val, - sizeof(val), HZ / 2); + (unsigned long)addr, idx & 0x03, + &priv->io_dmabuf->bits8, sizeof(val), HZ / 2); + + val = priv->io_dmabuf->bits8; + mutex_unlock(&priv->io_mutex); return val; } @@ -147,10 +157,14 @@ static inline u16 rtl818x_ioread16_idx(struct rtl8187_priv *priv, { __le16 val; + mutex_lock(&priv->io_mutex); usb_control_msg(priv->udev, usb_rcvctrlpipe(priv->udev, 0), RTL8187_REQ_GET_REG, RTL8187_REQT_READ, - (unsigned long)addr, idx & 0x03, &val, - sizeof(val), HZ / 2); + (unsigned long)addr, idx & 0x03, + &priv->io_dmabuf->bits16, sizeof(val), HZ / 2); + + val = priv->io_dmabuf->bits16; + mutex_unlock(&priv->io_mutex); return le16_to_cpu(val); } @@ -165,10 +179,14 @@ static inline u32 rtl818x_ioread32_idx(struct rtl8187_priv *priv, { __le32 val; + mutex_lock(&priv->io_mutex); usb_control_msg(priv->udev, usb_rcvctrlpipe(priv->udev, 0), RTL8187_REQ_GET_REG, RTL8187_REQT_READ, - (unsigned long)addr, idx & 0x03, &val, - sizeof(val), HZ / 2); + (unsigned long)addr, idx & 0x03, + &priv->io_dmabuf->bits32, sizeof(val), HZ / 2); + + val = priv->io_dmabuf->bits32; + mutex_unlock(&priv->io_mutex); return le32_to_cpu(val); } @@ -181,10 +199,15 @@ static inline u32 rtl818x_ioread32(struct rtl8187_priv *priv, __le32 *addr) static inline void rtl818x_iowrite8_idx(struct rtl8187_priv *priv, u8 *addr, u8 val, u8 idx) { + mutex_lock(&priv->io_mutex); + + priv->io_dmabuf->bits8 = val; usb_control_msg(priv->udev, usb_sndctrlpipe(priv->udev, 0), RTL8187_REQ_SET_REG, RTL8187_REQT_WRITE, - (unsigned long)addr, idx & 0x03, &val, - sizeof(val), HZ / 2); + (unsigned long)addr, idx & 0x03, + &priv->io_dmabuf->bits8, sizeof(val), HZ / 2); + + mutex_unlock(&priv->io_mutex); } static inline void rtl818x_iowrite8(struct rtl8187_priv *priv, u8 *addr, u8 val) @@ -195,12 +218,15 @@ static inline void rtl818x_iowrite8(struct rtl8187_priv *priv, u8 *addr, u8 val) static inline void rtl818x_iowrite16_idx(struct rtl8187_priv *priv, __le16 *addr, u16 val, u8 idx) { - __le16 buf = cpu_to_le16(val); + mutex_lock(&priv->io_mutex); + priv->io_dmabuf->bits16 = cpu_to_le16(val); usb_control_msg(priv->udev, usb_sndctrlpipe(priv->udev, 0), RTL8187_REQ_SET_REG, RTL8187_REQT_WRITE, - (unsigned long)addr, idx & 0x03, &buf, sizeof(buf), - HZ / 2); + (unsigned long)addr, idx & 0x03, + &priv->io_dmabuf->bits16, sizeof(val), HZ / 2); + + mutex_unlock(&priv->io_mutex); } static inline void rtl818x_iowrite16(struct rtl8187_priv *priv, __le16 *addr, @@ -212,12 +238,15 @@ static inline void rtl818x_iowrite16(struct rtl8187_priv *priv, __le16 *addr, static inline void rtl818x_iowrite32_idx(struct rtl8187_priv *priv, __le32 *addr, u32 val, u8 idx) { - __le32 buf = cpu_to_le32(val); + mutex_lock(&priv->io_mutex); + priv->io_dmabuf->bits32 = cpu_to_le32(val); usb_control_msg(priv->udev, usb_sndctrlpipe(priv->udev, 0), RTL8187_REQ_SET_REG, RTL8187_REQT_WRITE, - (unsigned long)addr, idx & 0x03, &buf, sizeof(buf), - HZ / 2); + (unsigned long)addr, idx & 0x03, + &priv->io_dmabuf->bits32, sizeof(val), HZ / 2); + + mutex_unlock(&priv->io_mutex); } static inline void rtl818x_iowrite32(struct rtl8187_priv *priv, __le32 *addr, diff --git a/drivers/net/wireless/rtl818x/rtl8187_dev.c b/drivers/net/wireless/rtl818x/rtl8187_dev.c index fd81884b9c7..d51ba0a88c2 100644 --- a/drivers/net/wireless/rtl818x/rtl8187_dev.c +++ b/drivers/net/wireless/rtl818x/rtl8187_dev.c @@ -71,6 +71,8 @@ static struct usb_device_id rtl8187_table[] __devinitdata = { {USB_DEVICE(0x18E8, 0x6232), .driver_info = DEVICE_RTL8187}, /* AirLive */ {USB_DEVICE(0x1b75, 0x8187), .driver_info = DEVICE_RTL8187}, + /* Linksys */ + {USB_DEVICE(0x1737, 0x0073), .driver_info = DEVICE_RTL8187B}, {} }; @@ -1329,6 +1331,14 @@ static int __devinit rtl8187_probe(struct usb_interface *intf, priv = dev->priv; priv->is_rtl8187b = (id->driver_info == DEVICE_RTL8187B); + /* allocate "DMA aware" buffer for register accesses */ + priv->io_dmabuf = kmalloc(sizeof(*priv->io_dmabuf), GFP_KERNEL); + if (!priv->io_dmabuf) { + err = -ENOMEM; + goto err_free_dev; + } + mutex_init(&priv->io_mutex); + SET_IEEE80211_DEV(dev, &intf->dev); usb_set_intfdata(intf, dev); priv->udev = udev; @@ -1495,7 +1505,7 @@ static int __devinit rtl8187_probe(struct usb_interface *intf, err = ieee80211_register_hw(dev); if (err) { printk(KERN_ERR "rtl8187: Cannot register device\n"); - goto err_free_dev; + goto err_free_dmabuf; } mutex_init(&priv->conf_mutex); skb_queue_head_init(&priv->b_tx_status.queue); @@ -1506,6 +1516,8 @@ static int __devinit rtl8187_probe(struct usb_interface *intf, return 0; + err_free_dmabuf: + kfree(priv->io_dmabuf); err_free_dev: ieee80211_free_hw(dev); usb_set_intfdata(intf, NULL); @@ -1526,6 +1538,7 @@ static void __devexit rtl8187_disconnect(struct usb_interface *intf) priv = dev->priv; usb_reset_device(priv->udev); usb_put_dev(interface_to_usbdev(intf)); + kfree(priv->io_dmabuf); ieee80211_free_hw(dev); } diff --git a/drivers/net/wireless/rtl818x/rtl8187_rtl8225.c b/drivers/net/wireless/rtl818x/rtl8187_rtl8225.c index 78df281b297..a09819386a1 100644 --- a/drivers/net/wireless/rtl818x/rtl8187_rtl8225.c +++ b/drivers/net/wireless/rtl818x/rtl8187_rtl8225.c @@ -88,9 +88,15 @@ static void rtl8225_write_8051(struct ieee80211_hw *dev, u8 addr, __le16 data) rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80); udelay(10); + mutex_lock(&priv->io_mutex); + + priv->io_dmabuf->bits16 = data; usb_control_msg(priv->udev, usb_sndctrlpipe(priv->udev, 0), RTL8187_REQ_SET_REG, RTL8187_REQT_WRITE, - addr, 0x8225, &data, sizeof(data), HZ / 2); + addr, 0x8225, &priv->io_dmabuf->bits16, sizeof(data), + HZ / 2); + + mutex_unlock(&priv->io_mutex); rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80 | (1 << 2)); udelay(10); diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c index f0e99d4c066..242257b1944 100644 --- a/drivers/oprofile/cpu_buffer.c +++ b/drivers/oprofile/cpu_buffer.c @@ -78,16 +78,20 @@ void free_cpu_buffers(void) op_ring_buffer_write = NULL; } +#define RB_EVENT_HDR_SIZE 4 + int alloc_cpu_buffers(void) { int i; unsigned long buffer_size = oprofile_cpu_buffer_size; + unsigned long byte_size = buffer_size * (sizeof(struct op_sample) + + RB_EVENT_HDR_SIZE); - op_ring_buffer_read = ring_buffer_alloc(buffer_size, OP_BUFFER_FLAGS); + op_ring_buffer_read = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS); if (!op_ring_buffer_read) goto fail; - op_ring_buffer_write = ring_buffer_alloc(buffer_size, OP_BUFFER_FLAGS); + op_ring_buffer_write = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS); if (!op_ring_buffer_write) goto fail; diff --git a/drivers/parport/parport_gsc.c b/drivers/parport/parport_gsc.c index e6a7e847ee8..ea31a452b15 100644 --- a/drivers/parport/parport_gsc.c +++ b/drivers/parport/parport_gsc.c @@ -352,8 +352,8 @@ static int __devinit parport_init_chip(struct parisc_device *dev) unsigned long port; if (!dev->irq) { - printk(KERN_WARNING "IRQ not found for parallel device at 0x%lx\n", - dev->hpa.start); + printk(KERN_WARNING "IRQ not found for parallel device at 0x%llx\n", + (unsigned long long)dev->hpa.start); return -ENODEV; } diff --git a/drivers/parport/share.c b/drivers/parport/share.c index 0ebca450ed2..dffa5d4fb29 100644 --- a/drivers/parport/share.c +++ b/drivers/parport/share.c @@ -614,7 +614,10 @@ parport_register_device(struct parport *port, const char *name, * pardevice fields. -arca */ port->ops->init_state(tmp, tmp->state); - parport_device_proc_register(tmp); + if (!test_and_set_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags)) { + port->proc_device = tmp; + parport_device_proc_register(tmp); + } return tmp; out_free_all: @@ -646,10 +649,14 @@ void parport_unregister_device(struct pardevice *dev) } #endif - parport_device_proc_unregister(dev); - port = dev->port->physport; + if (port->proc_device == dev) { + port->proc_device = NULL; + clear_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags); + parport_device_proc_unregister(dev); + } + if (port->cad == dev) { printk(KERN_DEBUG "%s: %s forgot to release port\n", port->name, dev->name); diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h index 4fc168b7009..e68d5f20ffb 100644 --- a/drivers/pci/hotplug/acpiphp.h +++ b/drivers/pci/hotplug/acpiphp.h @@ -129,7 +129,6 @@ struct acpiphp_func { struct acpiphp_bridge *bridge; /* Ejectable PCI-to-PCI bridge */ struct list_head sibling; - struct pci_dev *pci_dev; struct notifier_block nb; acpi_handle handle; diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index a33794d9e0d..3a6064bce56 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c @@ -32,9 +32,6 @@ /* * Lifetime rules for pci_dev: - * - The one in acpiphp_func has its refcount elevated by pci_get_slot() - * when the driver is loaded or when an insertion event occurs. It loses - * a refcount when its ejected or the driver unloads. * - The one in acpiphp_bridge has its refcount elevated by pci_get_slot() * when the bridge is scanned and it loses a refcount when the bridge * is removed. @@ -130,6 +127,7 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv) unsigned long long adr, sun; int device, function, retval; struct pci_bus *pbus = bridge->pci_bus; + struct pci_dev *pdev; if (!acpi_pci_check_ejectable(pbus, handle) && !is_dock_device(handle)) return AE_OK; @@ -213,10 +211,10 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv) newfunc->slot = slot; list_add_tail(&newfunc->sibling, &slot->funcs); - /* associate corresponding pci_dev */ - newfunc->pci_dev = pci_get_slot(pbus, PCI_DEVFN(device, function)); - if (newfunc->pci_dev) { + pdev = pci_get_slot(pbus, PCI_DEVFN(device, function)); + if (pdev) { slot->flags |= (SLOT_ENABLED | SLOT_POWEREDON); + pci_dev_put(pdev); } if (is_dock_device(handle)) { @@ -617,7 +615,6 @@ static void cleanup_bridge(struct acpiphp_bridge *bridge) if (ACPI_FAILURE(status)) err("failed to remove notify handler\n"); } - pci_dev_put(func->pci_dev); list_del(list); kfree(func); } @@ -1101,22 +1098,24 @@ static int __ref enable_device(struct acpiphp_slot *slot) pci_enable_bridges(bus); pci_bus_add_devices(bus); - /* associate pci_dev to our representation */ list_for_each (l, &slot->funcs) { func = list_entry(l, struct acpiphp_func, sibling); - func->pci_dev = pci_get_slot(bus, PCI_DEVFN(slot->device, - func->function)); - if (!func->pci_dev) + dev = pci_get_slot(bus, PCI_DEVFN(slot->device, + func->function)); + if (!dev) continue; - if (func->pci_dev->hdr_type != PCI_HEADER_TYPE_BRIDGE && - func->pci_dev->hdr_type != PCI_HEADER_TYPE_CARDBUS) + if (dev->hdr_type != PCI_HEADER_TYPE_BRIDGE && + dev->hdr_type != PCI_HEADER_TYPE_CARDBUS) { + pci_dev_put(dev); continue; + } status = find_p2p_bridge(func->handle, (u32)1, bus, NULL); if (ACPI_FAILURE(status)) warn("find_p2p_bridge failed (error code = 0x%x)\n", status); + pci_dev_put(dev); } slot->flags |= SLOT_ENABLED; @@ -1142,17 +1141,14 @@ static void disable_bridges(struct pci_bus *bus) */ static int disable_device(struct acpiphp_slot *slot) { - int retval = 0; struct acpiphp_func *func; - struct list_head *l; + struct pci_dev *pdev; /* is this slot already disabled? */ if (!(slot->flags & SLOT_ENABLED)) goto err_exit; - list_for_each (l, &slot->funcs) { - func = list_entry(l, struct acpiphp_func, sibling); - + list_for_each_entry(func, &slot->funcs, sibling) { if (func->bridge) { /* cleanup p2p bridges under this P2P bridge */ cleanup_p2p_bridge(func->bridge->handle, @@ -1160,35 +1156,28 @@ static int disable_device(struct acpiphp_slot *slot) func->bridge = NULL; } - if (func->pci_dev) { - pci_stop_bus_device(func->pci_dev); - if (func->pci_dev->subordinate) { - disable_bridges(func->pci_dev->subordinate); - pci_disable_device(func->pci_dev); + pdev = pci_get_slot(slot->bridge->pci_bus, + PCI_DEVFN(slot->device, func->function)); + if (pdev) { + pci_stop_bus_device(pdev); + if (pdev->subordinate) { + disable_bridges(pdev->subordinate); + pci_disable_device(pdev); } + pci_remove_bus_device(pdev); + pci_dev_put(pdev); } } - list_for_each (l, &slot->funcs) { - func = list_entry(l, struct acpiphp_func, sibling); - + list_for_each_entry(func, &slot->funcs, sibling) { acpiphp_unconfigure_ioapics(func->handle); acpiphp_bus_trim(func->handle); - /* try to remove anyway. - * acpiphp_bus_add might have been failed */ - - if (!func->pci_dev) - continue; - - pci_remove_bus_device(func->pci_dev); - pci_dev_put(func->pci_dev); - func->pci_dev = NULL; } slot->flags &= (~SLOT_ENABLED); - err_exit: - return retval; +err_exit: + return 0; } diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index 001b328adf8..a563fbe559d 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c @@ -59,6 +59,10 @@ #define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32)) #define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64)) +#ifndef PHYSICAL_PAGE_MASK +#define PHYSICAL_PAGE_MASK PAGE_MASK +#endif + /* global iommu list, set NULL for ignored DMAR units */ static struct intel_iommu **g_iommus; @@ -1216,7 +1220,7 @@ static void dmar_init_reserved_ranges(void) if (!r->flags || !(r->flags & IORESOURCE_MEM)) continue; addr = r->start; - addr &= PAGE_MASK; + addr &= PHYSICAL_PAGE_MASK; size = r->end - addr; size = PAGE_ALIGN(size); iova = reserve_iova(&reserved_iova_list, IOVA_PFN(addr), @@ -2173,7 +2177,8 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, * is not a big problem */ ret = domain_page_mapping(domain, start_paddr, - ((u64)paddr) & PAGE_MASK, size, prot); + ((u64)paddr) & PHYSICAL_PAGE_MASK, + size, prot); if (ret) goto error; @@ -2463,8 +2468,8 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne addr = page_to_phys(sg_page(sg)) + sg->offset; size = aligned_size((u64)addr, sg->length); ret = domain_page_mapping(domain, start_addr + offset, - ((u64)addr) & PAGE_MASK, - size, prot); + ((u64)addr) & PHYSICAL_PAGE_MASK, + size, prot); if (ret) { /* clear the page */ dma_pte_clear_range(domain, start_addr, diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 6f2e6295e77..362773247fb 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -455,8 +455,6 @@ static int msix_capability_init(struct pci_dev *dev, entry->msi_attrib.default_irq = dev->irq; entry->msi_attrib.pos = pos; entry->mask_base = base; - entry->masked = readl(base + j * PCI_MSIX_ENTRY_SIZE + - PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET); msix_mask_irq(entry, 1); list_add_tail(&entry->list, &dev->msi_list); @@ -493,6 +491,12 @@ static int msix_capability_init(struct pci_dev *dev, msix_set_enable(dev, 1); dev->msix_enabled = 1; + list_for_each_entry(entry, &dev->msi_list, list) { + int vector = entry->msi_attrib.entry_nr; + entry->masked = readl(base + vector * PCI_MSIX_ENTRY_SIZE + + PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET); + } + return 0; } diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 34bf0fdf504..1a91bf9687a 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -557,7 +557,8 @@ static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state) } else { error = -ENODEV; /* Fall back to PCI_D0 if native PM is not supported */ - pci_update_current_state(dev, PCI_D0); + if (!dev->pm_cap) + dev->current_state = PCI_D0; } return error; diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h index c7ad68b6c6d..aa14482a477 100644 --- a/drivers/pci/pcie/aer/aerdrv.h +++ b/drivers/pci/pcie/aer/aerdrv.h @@ -95,6 +95,9 @@ struct aer_broadcast_data { static inline pci_ers_result_t merge_result(enum pci_ers_result orig, enum pci_ers_result new) { + if (new == PCI_ERS_RESULT_NONE) + return orig; + switch (orig) { case PCI_ERS_RESULT_CAN_RECOVER: case PCI_ERS_RESULT_RECOVERED: diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c index b924e2463f8..091ce70051e 100644 --- a/drivers/pci/pcie/portdrv_pci.c +++ b/drivers/pci/pcie/portdrv_pci.c @@ -200,7 +200,7 @@ static int slot_reset_iter(struct device *device, void *data) static pci_ers_result_t pcie_portdrv_slot_reset(struct pci_dev *dev) { - pci_ers_result_t status = PCI_ERS_RESULT_NONE; + pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED; int retval; /* If fatal, restore cfg space for possible link reset at upstream */ diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index e3c3e081b83..f1ae2475fff 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -745,6 +745,8 @@ int pci_setup_device(struct pci_dev *dev) /* Early fixups, before probing the BARs */ pci_fixup_device(pci_fixup_early, dev); + /* device class may be changed after fixup */ + class = dev->class >> 8; switch (dev->hdr_type) { /* header type */ case PCI_HEADER_TYPE_NORMAL: /* standard header */ diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c index eeafc6c0160..bfc1a8892a3 100644 --- a/drivers/platform/x86/asus-laptop.c +++ b/drivers/platform/x86/asus-laptop.c @@ -269,16 +269,16 @@ static struct key_entry asus_keymap[] = { {KE_KEY, 0x34, KEY_SWITCHVIDEOMODE}, {KE_KEY, 0x40, KEY_PREVIOUSSONG}, {KE_KEY, 0x41, KEY_NEXTSONG}, - {KE_KEY, 0x43, KEY_STOP}, + {KE_KEY, 0x43, KEY_STOPCD}, {KE_KEY, 0x45, KEY_PLAYPAUSE}, {KE_KEY, 0x50, KEY_EMAIL}, {KE_KEY, 0x51, KEY_WWW}, - {KE_KEY, 0x5C, BTN_EXTRA}, /* Performance */ + {KE_KEY, 0x5C, KEY_SCREENLOCK}, /* Screenlock */ {KE_KEY, 0x5D, KEY_WLAN}, {KE_KEY, 0x61, KEY_SWITCHVIDEOMODE}, {KE_KEY, 0x6B, BTN_TOUCH}, /* Lock Mouse */ {KE_KEY, 0x82, KEY_CAMERA}, - {KE_KEY, 0x8A, KEY_TV}, + {KE_KEY, 0x8A, KEY_PROG1}, {KE_KEY, 0x95, KEY_MEDIA}, {KE_KEY, 0x99, KEY_PHONE}, {KE_END, 0}, diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c index 6f54fd1757c..353a898c369 100644 --- a/drivers/platform/x86/eeepc-laptop.c +++ b/drivers/platform/x86/eeepc-laptop.c @@ -158,6 +158,7 @@ enum { KE_KEY, KE_END }; static struct key_entry eeepc_keymap[] = { /* Sleep already handled via generic ACPI code */ {KE_KEY, 0x10, KEY_WLAN }, + {KE_KEY, 0x11, KEY_WLAN }, {KE_KEY, 0x12, KEY_PROG1 }, {KE_KEY, 0x13, KEY_MUTE }, {KE_KEY, 0x14, KEY_VOLUMEDOWN }, @@ -166,6 +167,8 @@ static struct key_entry eeepc_keymap[] = { {KE_KEY, 0x1b, KEY_ZOOM }, {KE_KEY, 0x1c, KEY_PROG2 }, {KE_KEY, 0x1d, KEY_PROG3 }, + {KE_KEY, NOTIFY_BRN_MIN, KEY_BRIGHTNESSDOWN }, + {KE_KEY, NOTIFY_BRN_MIN + 2, KEY_BRIGHTNESSUP }, {KE_KEY, 0x30, KEY_SWITCHVIDEOMODE }, {KE_KEY, 0x31, KEY_SWITCHVIDEOMODE }, {KE_KEY, 0x32, KEY_SWITCHVIDEOMODE }, @@ -381,11 +384,13 @@ static ssize_t show_sys_acpi(int cm, char *buf) EEEPC_CREATE_DEVICE_ATTR(camera, CM_ASL_CAMERA); EEEPC_CREATE_DEVICE_ATTR(cardr, CM_ASL_CARDREADER); EEEPC_CREATE_DEVICE_ATTR(disp, CM_ASL_DISPLAYSWITCH); +EEEPC_CREATE_DEVICE_ATTR(cpufv, CM_ASL_CPUFV); static struct attribute *platform_attributes[] = { &dev_attr_camera.attr, &dev_attr_cardr.attr, &dev_attr_disp.attr, + &dev_attr_cpufv.attr, NULL }; @@ -512,15 +517,21 @@ static int eeepc_hotk_check(void) return 0; } -static void notify_brn(void) +static int notify_brn(void) { + /* returns the *previous* brightness, or -1 */ struct backlight_device *bd = eeepc_backlight_device; - if (bd) + if (bd) { + int old = bd->props.brightness; bd->props.brightness = read_brightness(bd); + return old; + } + return -1; } static void eeepc_rfkill_notify(acpi_handle handle, u32 event, void *data) { + enum rfkill_state state; struct pci_dev *dev; struct pci_bus *bus = pci_find_bus(0, 1); @@ -532,7 +543,9 @@ static void eeepc_rfkill_notify(acpi_handle handle, u32 event, void *data) return; } - if (get_acpi(CM_ASL_WLAN) == 1) { + eeepc_wlan_rfkill_state(ehotk->eeepc_wlan_rfkill, &state); + + if (state == RFKILL_STATE_UNBLOCKED) { dev = pci_get_slot(bus, 0); if (dev) { /* Device already present */ @@ -552,23 +565,41 @@ static void eeepc_rfkill_notify(acpi_handle handle, u32 event, void *data) pci_dev_put(dev); } } + + rfkill_force_state(ehotk->eeepc_wlan_rfkill, state); } static void eeepc_hotk_notify(acpi_handle handle, u32 event, void *data) { static struct key_entry *key; u16 count; + int brn = -ENODEV; if (!ehotk) return; if (event >= NOTIFY_BRN_MIN && event <= NOTIFY_BRN_MAX) - notify_brn(); + brn = notify_brn(); count = ehotk->event_count[event % 128]++; acpi_bus_generate_proc_event(ehotk->device, event, count); acpi_bus_generate_netlink_event(ehotk->device->pnp.device_class, dev_name(&ehotk->device->dev), event, count); if (ehotk->inputdev) { + if (brn != -ENODEV) { + /* brightness-change events need special + * handling for conversion to key events + */ + if (brn < 0) + brn = event; + else + brn += NOTIFY_BRN_MIN; + if (event < brn) + event = NOTIFY_BRN_MIN; /* brightness down */ + else if (event > brn) + event = NOTIFY_BRN_MIN + 2; /* ... up */ + else + event = NOTIFY_BRN_MIN + 1; /* ... unchanged */ + } key = eepc_get_entry_by_scancode(event); if (key) { switch (key->type) { @@ -649,6 +680,9 @@ static int eeepc_hotk_add(struct acpi_device *device) if (ACPI_FAILURE(status)) printk(EEEPC_ERR "Error installing notify handler\n"); + eeepc_register_rfkill_notifier("\\_SB.PCI0.P0P6"); + eeepc_register_rfkill_notifier("\\_SB.PCI0.P0P7"); + if (get_acpi(CM_ASL_WLAN) != -1) { ehotk->eeepc_wlan_rfkill = rfkill_allocate(&device->dev, RFKILL_TYPE_WLAN); @@ -704,9 +738,6 @@ static int eeepc_hotk_add(struct acpi_device *device) goto bluetooth_fail; } - eeepc_register_rfkill_notifier("\\_SB.PCI0.P0P6"); - eeepc_register_rfkill_notifier("\\_SB.PCI0.P0P7"); - return 0; bluetooth_fail: @@ -717,6 +748,8 @@ static int eeepc_hotk_add(struct acpi_device *device) wlan_fail: if (ehotk->eeepc_wlan_rfkill) rfkill_free(ehotk->eeepc_wlan_rfkill); + eeepc_unregister_rfkill_notifier("\\_SB.PCI0.P0P6"); + eeepc_unregister_rfkill_notifier("\\_SB.PCI0.P0P7"); ehotk_fail: kfree(ehotk); ehotk = NULL; diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c index 9a3a682c698..9496494f340 100644 --- a/drivers/pnp/pnpacpi/core.c +++ b/drivers/pnp/pnpacpi/core.c @@ -110,11 +110,9 @@ static int pnpacpi_disable_resources(struct pnp_dev *dev) /* acpi_unregister_gsi(pnp_irq(dev, 0)); */ ret = 0; - if (acpi_bus_power_manageable(handle)) { - ret = acpi_bus_set_power(handle, ACPI_STATE_D3); - if (ret) - return ret; - } + if (acpi_bus_power_manageable(handle)) + acpi_bus_set_power(handle, ACPI_STATE_D3); + /* continue even if acpi_bus_set_power() fails */ if (ACPI_FAILURE(acpi_evaluate_object(handle, "_DIS", NULL, NULL))) ret = -ENODEV; return ret; diff --git a/drivers/regulator/da903x.c b/drivers/regulator/da903x.c index 72b15495183..c6628f5a0af 100644 --- a/drivers/regulator/da903x.c +++ b/drivers/regulator/da903x.c @@ -497,7 +497,7 @@ static struct platform_driver da903x_regulator_driver = { .owner = THIS_MODULE, }, .probe = da903x_regulator_probe, - .remove = da903x_regulator_remove, + .remove = __devexit_p(da903x_regulator_remove), }; static int __init da903x_regulator_init(void) diff --git a/drivers/rtc/rtc-pl030.c b/drivers/rtc/rtc-pl030.c index 82615355215..aaf1f75fa29 100644 --- a/drivers/rtc/rtc-pl030.c +++ b/drivers/rtc/rtc-pl030.c @@ -102,7 +102,7 @@ static const struct rtc_class_ops pl030_ops = { .set_alarm = pl030_set_alarm, }; -static int pl030_probe(struct amba_device *dev, void *id) +static int pl030_probe(struct amba_device *dev, struct amba_id *id) { struct pl030_rtc *rtc; int ret; diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c index 333eec689d2..451fc13784d 100644 --- a/drivers/rtc/rtc-pl031.c +++ b/drivers/rtc/rtc-pl031.c @@ -127,7 +127,7 @@ static int pl031_remove(struct amba_device *adev) return 0; } -static int pl031_probe(struct amba_device *adev, void *id) +static int pl031_probe(struct amba_device *adev, struct amba_id *id) { int ret; struct pl031_local *ldata; diff --git a/drivers/rtc/rtc-twl4030.c b/drivers/rtc/rtc-twl4030.c index a6341e4f9a0..9c8c70c497d 100644 --- a/drivers/rtc/rtc-twl4030.c +++ b/drivers/rtc/rtc-twl4030.c @@ -495,9 +495,7 @@ static int twl4030_rtc_suspend(struct platform_device *pdev, pm_message_t state) { irqstat = rtc_irq_bits; - /* REVISIT alarm may need to wake us from sleep */ - mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_TIMER_M | - BIT_RTC_INTERRUPTS_REG_IT_ALARM_M); + mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_TIMER_M); return 0; } diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c index 8b7983aba8f..36c21b19e5d 100644 --- a/drivers/scsi/3w-9xxx.c +++ b/drivers/scsi/3w-9xxx.c @@ -1978,7 +1978,8 @@ static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id) { struct scsi_cmnd *cmd = tw_dev->srb[request_id]; - scsi_dma_unmap(cmd); + if (cmd->SCp.phase == TW_PHASE_SGLIST) + scsi_dma_unmap(cmd); } /* End twa_unmap_scsi_data() */ /* scsi_host_template initializer */ diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c index c03f1d2c9e2..faa0fcfed71 100644 --- a/drivers/scsi/3w-xxxx.c +++ b/drivers/scsi/3w-xxxx.c @@ -6,7 +6,7 @@ Arnaldo Carvalho de Melo Brad Strand - Copyright (C) 1999-2007 3ware Inc. + Copyright (C) 1999-2009 3ware Inc. Kernel compatiblity By: Andre Hedrick Non-Copyright (C) 2000 Andre Hedrick @@ -1294,7 +1294,8 @@ static void tw_unmap_scsi_data(struct pci_dev *pdev, struct scsi_cmnd *cmd) { dprintk(KERN_WARNING "3w-xxxx: tw_unmap_scsi_data()\n"); - scsi_dma_unmap(cmd); + if (cmd->SCp.phase == TW_PHASE_SGLIST) + scsi_dma_unmap(cmd); } /* End tw_unmap_scsi_data() */ /* This function will reset a device extension */ diff --git a/drivers/scsi/3w-xxxx.h b/drivers/scsi/3w-xxxx.h index 8e71e5e122b..a5a2ba2561d 100644 --- a/drivers/scsi/3w-xxxx.h +++ b/drivers/scsi/3w-xxxx.h @@ -6,7 +6,7 @@ Arnaldo Carvalho de Melo Brad Strand - Copyright (C) 1999-2007 3ware Inc. + Copyright (C) 1999-2009 3ware Inc. Kernel compatiblity By: Andre Hedrick Non-Copyright (C) 2000 Andre Hedrick diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 8ed2990c826..fb2740789b6 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -628,6 +628,17 @@ config FCOE ---help--- Fibre Channel over Ethernet module +config FCOE_FNIC + tristate "Cisco FNIC Driver" + depends on PCI && X86 + select LIBFC + help + This is support for the Cisco PCI-Express FCoE HBA. + + To compile this driver as a module, choose M here and read + . + The module will be called fnic. + config SCSI_DMX3191D tristate "DMX3191D SCSI support" depends on PCI && SCSI diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index e7c861ac417..a5049cfb40e 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile @@ -39,6 +39,7 @@ obj-$(CONFIG_SCSI_DH) += device_handler/ obj-$(CONFIG_LIBFC) += libfc/ obj-$(CONFIG_LIBFCOE) += fcoe/ obj-$(CONFIG_FCOE) += fcoe/ +obj-$(CONFIG_FCOE_FNIC) += fnic/ obj-$(CONFIG_ISCSI_TCP) += libiscsi.o libiscsi_tcp.o iscsi_tcp.o obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o obj-$(CONFIG_SCSI_A4000T) += 53c700.o a4000t.o diff --git a/drivers/scsi/fnic/Makefile b/drivers/scsi/fnic/Makefile new file mode 100644 index 00000000000..37c3440bc17 --- /dev/null +++ b/drivers/scsi/fnic/Makefile @@ -0,0 +1,15 @@ +obj-$(CONFIG_FCOE_FNIC) += fnic.o + +fnic-y := \ + fnic_attrs.o \ + fnic_isr.o \ + fnic_main.o \ + fnic_res.o \ + fnic_fcs.o \ + fnic_scsi.o \ + vnic_cq.o \ + vnic_dev.o \ + vnic_intr.o \ + vnic_rq.o \ + vnic_wq_copy.o \ + vnic_wq.o diff --git a/drivers/scsi/fnic/cq_desc.h b/drivers/scsi/fnic/cq_desc.h new file mode 100644 index 00000000000..d1225cf6320 --- /dev/null +++ b/drivers/scsi/fnic/cq_desc.h @@ -0,0 +1,78 @@ +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef _CQ_DESC_H_ +#define _CQ_DESC_H_ + +/* + * Completion queue descriptor types + */ +enum cq_desc_types { + CQ_DESC_TYPE_WQ_ENET = 0, + CQ_DESC_TYPE_DESC_COPY = 1, + CQ_DESC_TYPE_WQ_EXCH = 2, + CQ_DESC_TYPE_RQ_ENET = 3, + CQ_DESC_TYPE_RQ_FCP = 4, +}; + +/* Completion queue descriptor: 16B + * + * All completion queues have this basic layout. The + * type_specfic area is unique for each completion + * queue type. + */ +struct cq_desc { + __le16 completed_index; + __le16 q_number; + u8 type_specfic[11]; + u8 type_color; +}; + +#define CQ_DESC_TYPE_BITS 4 +#define CQ_DESC_TYPE_MASK ((1 << CQ_DESC_TYPE_BITS) - 1) +#define CQ_DESC_COLOR_MASK 1 +#define CQ_DESC_COLOR_SHIFT 7 +#define CQ_DESC_Q_NUM_BITS 10 +#define CQ_DESC_Q_NUM_MASK ((1 << CQ_DESC_Q_NUM_BITS) - 1) +#define CQ_DESC_COMP_NDX_BITS 12 +#define CQ_DESC_COMP_NDX_MASK ((1 << CQ_DESC_COMP_NDX_BITS) - 1) + +static inline void cq_desc_dec(const struct cq_desc *desc_arg, + u8 *type, u8 *color, u16 *q_number, u16 *completed_index) +{ + const struct cq_desc *desc = desc_arg; + const u8 type_color = desc->type_color; + + *color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK; + + /* + * Make sure color bit is read from desc *before* other fields + * are read from desc. Hardware guarantees color bit is last + * bit (byte) written. Adding the rmb() prevents the compiler + * and/or CPU from reordering the reads which would potentially + * result in reading stale values. + */ + + rmb(); + + *type = type_color & CQ_DESC_TYPE_MASK; + *q_number = le16_to_cpu(desc->q_number) & CQ_DESC_Q_NUM_MASK; + *completed_index = le16_to_cpu(desc->completed_index) & + CQ_DESC_COMP_NDX_MASK; +} + +#endif /* _CQ_DESC_H_ */ diff --git a/drivers/scsi/fnic/cq_enet_desc.h b/drivers/scsi/fnic/cq_enet_desc.h new file mode 100644 index 00000000000..a9fa26f82dd --- /dev/null +++ b/drivers/scsi/fnic/cq_enet_desc.h @@ -0,0 +1,167 @@ +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef _CQ_ENET_DESC_H_ +#define _CQ_ENET_DESC_H_ + +#include "cq_desc.h" + +/* Ethernet completion queue descriptor: 16B */ +struct cq_enet_wq_desc { + __le16 completed_index; + __le16 q_number; + u8 reserved[11]; + u8 type_color; +}; + +static inline void cq_enet_wq_desc_dec(struct cq_enet_wq_desc *desc, + u8 *type, u8 *color, u16 *q_number, u16 *completed_index) +{ + cq_desc_dec((struct cq_desc *)desc, type, + color, q_number, completed_index); +} + +/* Completion queue descriptor: Ethernet receive queue, 16B */ +struct cq_enet_rq_desc { + __le16 completed_index_flags; + __le16 q_number_rss_type_flags; + __le32 rss_hash; + __le16 bytes_written_flags; + __le16 vlan; + __le16 checksum_fcoe; + u8 flags; + u8 type_color; +}; + +#define CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT (0x1 << 12) +#define CQ_ENET_RQ_DESC_FLAGS_FCOE (0x1 << 13) +#define CQ_ENET_RQ_DESC_FLAGS_EOP (0x1 << 14) +#define CQ_ENET_RQ_DESC_FLAGS_SOP (0x1 << 15) + +#define CQ_ENET_RQ_DESC_RSS_TYPE_BITS 4 +#define CQ_ENET_RQ_DESC_RSS_TYPE_MASK \ + ((1 << CQ_ENET_RQ_DESC_RSS_TYPE_BITS) - 1) +#define CQ_ENET_RQ_DESC_RSS_TYPE_NONE 0 +#define CQ_ENET_RQ_DESC_RSS_TYPE_IPv4 1 +#define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv4 2 +#define CQ_ENET_RQ_DESC_RSS_TYPE_IPv6 3 +#define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6 4 +#define CQ_ENET_RQ_DESC_RSS_TYPE_IPv6_EX 5 +#define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6_EX 6 + +#define CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC (0x1 << 14) + +#define CQ_ENET_RQ_DESC_BYTES_WRITTEN_BITS 14 +#define CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK \ + ((1 << CQ_ENET_RQ_DESC_BYTES_WRITTEN_BITS) - 1) +#define CQ_ENET_RQ_DESC_FLAGS_TRUNCATED (0x1 << 14) +#define CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED (0x1 << 15) + +#define CQ_ENET_RQ_DESC_FCOE_SOF_BITS 4 +#define CQ_ENET_RQ_DESC_FCOE_SOF_MASK \ + ((1 << CQ_ENET_RQ_DESC_FCOE_SOF_BITS) - 1) +#define CQ_ENET_RQ_DESC_FCOE_EOF_BITS 8 +#define CQ_ENET_RQ_DESC_FCOE_EOF_MASK \ + ((1 << CQ_ENET_RQ_DESC_FCOE_EOF_BITS) - 1) +#define CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT 8 + +#define CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK (0x1 << 0) +#define CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK (0x1 << 0) +#define CQ_ENET_RQ_DESC_FLAGS_UDP (0x1 << 1) +#define CQ_ENET_RQ_DESC_FCOE_ENC_ERROR (0x1 << 1) +#define CQ_ENET_RQ_DESC_FLAGS_TCP (0x1 << 2) +#define CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK (0x1 << 3) +#define CQ_ENET_RQ_DESC_FLAGS_IPV6 (0x1 << 4) +#define CQ_ENET_RQ_DESC_FLAGS_IPV4 (0x1 << 5) +#define CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT (0x1 << 6) +#define CQ_ENET_RQ_DESC_FLAGS_FCS_OK (0x1 << 7) + +static inline void cq_enet_rq_desc_dec(struct cq_enet_rq_desc *desc, + u8 *type, u8 *color, u16 *q_number, u16 *completed_index, + u8 *ingress_port, u8 *fcoe, u8 *eop, u8 *sop, u8 *rss_type, + u8 *csum_not_calc, u32 *rss_hash, u16 *bytes_written, u8 *packet_error, + u8 *vlan_stripped, u16 *vlan, u16 *checksum, u8 *fcoe_sof, + u8 *fcoe_fc_crc_ok, u8 *fcoe_enc_error, u8 *fcoe_eof, + u8 *tcp_udp_csum_ok, u8 *udp, u8 *tcp, u8 *ipv4_csum_ok, + u8 *ipv6, u8 *ipv4, u8 *ipv4_fragment, u8 *fcs_ok) +{ + u16 completed_index_flags = le16_to_cpu(desc->completed_index_flags); + u16 q_number_rss_type_flags = + le16_to_cpu(desc->q_number_rss_type_flags); + u16 bytes_written_flags = le16_to_cpu(desc->bytes_written_flags); + + cq_desc_dec((struct cq_desc *)desc, type, + color, q_number, completed_index); + + *ingress_port = (completed_index_flags & + CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT) ? 1 : 0; + *fcoe = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_FCOE) ? + 1 : 0; + *eop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_EOP) ? + 1 : 0; + *sop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_SOP) ? + 1 : 0; + + *rss_type = (u8)((q_number_rss_type_flags >> CQ_DESC_Q_NUM_BITS) & + CQ_ENET_RQ_DESC_RSS_TYPE_MASK); + *csum_not_calc = (q_number_rss_type_flags & + CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ? 1 : 0; + + *rss_hash = le32_to_cpu(desc->rss_hash); + + *bytes_written = bytes_written_flags & + CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK; + *packet_error = (bytes_written_flags & + CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ? 1 : 0; + *vlan_stripped = (bytes_written_flags & + CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) ? 1 : 0; + + *vlan = le16_to_cpu(desc->vlan); + + if (*fcoe) { + *fcoe_sof = (u8)(le16_to_cpu(desc->checksum_fcoe) & + CQ_ENET_RQ_DESC_FCOE_SOF_MASK); + *fcoe_fc_crc_ok = (desc->flags & + CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK) ? 1 : 0; + *fcoe_enc_error = (desc->flags & + CQ_ENET_RQ_DESC_FCOE_ENC_ERROR) ? 1 : 0; + *fcoe_eof = (u8)((desc->checksum_fcoe >> + CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT) & + CQ_ENET_RQ_DESC_FCOE_EOF_MASK); + *checksum = 0; + } else { + *fcoe_sof = 0; + *fcoe_fc_crc_ok = 0; + *fcoe_enc_error = 0; + *fcoe_eof = 0; + *checksum = le16_to_cpu(desc->checksum_fcoe); + } + + *tcp_udp_csum_ok = + (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ? 1 : 0; + *udp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_UDP) ? 1 : 0; + *tcp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP) ? 1 : 0; + *ipv4_csum_ok = + (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ? 1 : 0; + *ipv6 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV6) ? 1 : 0; + *ipv4 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4) ? 1 : 0; + *ipv4_fragment = + (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT) ? 1 : 0; + *fcs_ok = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_FCS_OK) ? 1 : 0; +} + +#endif /* _CQ_ENET_DESC_H_ */ diff --git a/drivers/scsi/fnic/cq_exch_desc.h b/drivers/scsi/fnic/cq_exch_desc.h new file mode 100644 index 00000000000..501660cfe22 --- /dev/null +++ b/drivers/scsi/fnic/cq_exch_desc.h @@ -0,0 +1,182 @@ +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef _CQ_EXCH_DESC_H_ +#define _CQ_EXCH_DESC_H_ + +#include "cq_desc.h" + +/* Exchange completion queue descriptor: 16B */ +struct cq_exch_wq_desc { + u16 completed_index; + u16 q_number; + u16 exchange_id; + u8 tmpl; + u8 reserved0; + u32 reserved1; + u8 exch_status; + u8 reserved2[2]; + u8 type_color; +}; + +#define CQ_EXCH_WQ_STATUS_BITS 2 +#define CQ_EXCH_WQ_STATUS_MASK ((1 << CQ_EXCH_WQ_STATUS_BITS) - 1) + +enum cq_exch_status_types { + CQ_EXCH_WQ_STATUS_TYPE_COMPLETE = 0, + CQ_EXCH_WQ_STATUS_TYPE_ABORT = 1, + CQ_EXCH_WQ_STATUS_TYPE_SGL_EOF = 2, + CQ_EXCH_WQ_STATUS_TYPE_TMPL_ERR = 3, +}; + +static inline void cq_exch_wq_desc_dec(struct cq_exch_wq_desc *desc_ptr, + u8 *type, + u8 *color, + u16 *q_number, + u16 *completed_index, + u8 *exch_status) +{ + cq_desc_dec((struct cq_desc *)desc_ptr, type, + color, q_number, completed_index); + *exch_status = desc_ptr->exch_status & CQ_EXCH_WQ_STATUS_MASK; +} + +struct cq_fcp_rq_desc { + u16 completed_index_eop_sop_prt; + u16 q_number; + u16 exchange_id; + u16 tmpl; + u16 bytes_written; + u16 vlan; + u8 sof; + u8 eof; + u8 fcs_fer_fck; + u8 type_color; +}; + +#define CQ_FCP_RQ_DESC_FLAGS_SOP (1 << 15) +#define CQ_FCP_RQ_DESC_FLAGS_EOP (1 << 14) +#define CQ_FCP_RQ_DESC_FLAGS_PRT (1 << 12) +#define CQ_FCP_RQ_DESC_TMPL_MASK 0x1f +#define CQ_FCP_RQ_DESC_BYTES_WRITTEN_MASK 0x3fff +#define CQ_FCP_RQ_DESC_PACKET_ERR_SHIFT 14 +#define CQ_FCP_RQ_DESC_PACKET_ERR_MASK (1 << CQ_FCP_RQ_DESC_PACKET_ERR_SHIFT) +#define CQ_FCP_RQ_DESC_VS_STRIPPED_SHIFT 15 +#define CQ_FCP_RQ_DESC_VS_STRIPPED_MASK (1 << CQ_FCP_RQ_DESC_VS_STRIPPED_SHIFT) +#define CQ_FCP_RQ_DESC_FC_CRC_OK_MASK 0x1 +#define CQ_FCP_RQ_DESC_FCOE_ERR_SHIFT 1 +#define CQ_FCP_RQ_DESC_FCOE_ERR_MASK (1 << CQ_FCP_RQ_DESC_FCOE_ERR_SHIFT) +#define CQ_FCP_RQ_DESC_FCS_OK_SHIFT 7 +#define CQ_FCP_RQ_DESC_FCS_OK_MASK (1 << CQ_FCP_RQ_DESC_FCS_OK_SHIFT) + +static inline void cq_fcp_rq_desc_dec(struct cq_fcp_rq_desc *desc_ptr, + u8 *type, + u8 *color, + u16 *q_number, + u16 *completed_index, + u8 *eop, + u8 *sop, + u8 *fck, + u16 *exchange_id, + u16 *tmpl, + u32 *bytes_written, + u8 *sof, + u8 *eof, + u8 *ingress_port, + u8 *packet_err, + u8 *fcoe_err, + u8 *fcs_ok, + u8 *vlan_stripped, + u16 *vlan) +{ + cq_desc_dec((struct cq_desc *)desc_ptr, type, + color, q_number, completed_index); + *eop = (desc_ptr->completed_index_eop_sop_prt & + CQ_FCP_RQ_DESC_FLAGS_EOP) ? 1 : 0; + *sop = (desc_ptr->completed_index_eop_sop_prt & + CQ_FCP_RQ_DESC_FLAGS_SOP) ? 1 : 0; + *ingress_port = + (desc_ptr->completed_index_eop_sop_prt & + CQ_FCP_RQ_DESC_FLAGS_PRT) ? 1 : 0; + *exchange_id = desc_ptr->exchange_id; + *tmpl = desc_ptr->tmpl & CQ_FCP_RQ_DESC_TMPL_MASK; + *bytes_written = + desc_ptr->bytes_written & CQ_FCP_RQ_DESC_BYTES_WRITTEN_MASK; + *packet_err = + (desc_ptr->bytes_written & CQ_FCP_RQ_DESC_PACKET_ERR_MASK) >> + CQ_FCP_RQ_DESC_PACKET_ERR_SHIFT; + *vlan_stripped = + (desc_ptr->bytes_written & CQ_FCP_RQ_DESC_VS_STRIPPED_MASK) >> + CQ_FCP_RQ_DESC_VS_STRIPPED_SHIFT; + *vlan = desc_ptr->vlan; + *sof = desc_ptr->sof; + *fck = desc_ptr->fcs_fer_fck & CQ_FCP_RQ_DESC_FC_CRC_OK_MASK; + *fcoe_err = (desc_ptr->fcs_fer_fck & CQ_FCP_RQ_DESC_FCOE_ERR_MASK) >> + CQ_FCP_RQ_DESC_FCOE_ERR_SHIFT; + *eof = desc_ptr->eof; + *fcs_ok = + (desc_ptr->fcs_fer_fck & CQ_FCP_RQ_DESC_FCS_OK_MASK) >> + CQ_FCP_RQ_DESC_FCS_OK_SHIFT; +} + +struct cq_sgl_desc { + u16 exchange_id; + u16 q_number; + u32 active_burst_offset; + u32 tot_data_bytes; + u16 tmpl; + u8 sgl_err; + u8 type_color; +}; + +enum cq_sgl_err_types { + CQ_SGL_ERR_NO_ERROR = 0, + CQ_SGL_ERR_OVERFLOW, /* data ran beyond end of SGL */ + CQ_SGL_ERR_SGL_LCL_ADDR_ERR, /* sgl access to local vnic addr illegal*/ + CQ_SGL_ERR_ADDR_RSP_ERR, /* sgl address error */ + CQ_SGL_ERR_DATA_RSP_ERR, /* sgl data rsp error */ + CQ_SGL_ERR_CNT_ZERO_ERR, /* SGL count is 0 */ + CQ_SGL_ERR_CNT_MAX_ERR, /* SGL count is larger than supported */ + CQ_SGL_ERR_ORDER_ERR, /* frames recv on both ports, order err */ + CQ_SGL_ERR_DATA_LCL_ADDR_ERR,/* sgl data buf to local vnic addr ill */ + CQ_SGL_ERR_HOST_CQ_ERR, /* host cq entry to local vnic addr ill */ +}; + +#define CQ_SGL_SGL_ERR_MASK 0x1f +#define CQ_SGL_TMPL_MASK 0x1f + +static inline void cq_sgl_desc_dec(struct cq_sgl_desc *desc_ptr, + u8 *type, + u8 *color, + u16 *q_number, + u16 *exchange_id, + u32 *active_burst_offset, + u32 *tot_data_bytes, + u16 *tmpl, + u8 *sgl_err) +{ + /* Cheat a little by assuming exchange_id is the same as completed + index */ + cq_desc_dec((struct cq_desc *)desc_ptr, type, color, q_number, + exchange_id); + *active_burst_offset = desc_ptr->active_burst_offset; + *tot_data_bytes = desc_ptr->tot_data_bytes; + *tmpl = desc_ptr->tmpl & CQ_SGL_TMPL_MASK; + *sgl_err = desc_ptr->sgl_err & CQ_SGL_SGL_ERR_MASK; +} + +#endif /* _CQ_EXCH_DESC_H_ */ diff --git a/drivers/scsi/fnic/fcpio.h b/drivers/scsi/fnic/fcpio.h new file mode 100644 index 00000000000..12d770d885c --- /dev/null +++ b/drivers/scsi/fnic/fcpio.h @@ -0,0 +1,780 @@ +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef _FCPIO_H_ +#define _FCPIO_H_ + +#include + +/* + * This header file includes all of the data structures used for + * communication by the host driver to the fcp firmware. + */ + +/* + * Exchange and sequence id space allocated to the host driver + */ +#define FCPIO_HOST_EXCH_RANGE_START 0x1000 +#define FCPIO_HOST_EXCH_RANGE_END 0x1fff +#define FCPIO_HOST_SEQ_ID_RANGE_START 0x80 +#define FCPIO_HOST_SEQ_ID_RANGE_END 0xff + +/* + * Command entry type + */ +enum fcpio_type { + /* + * Initiator request types + */ + FCPIO_ICMND_16 = 0x1, + FCPIO_ICMND_32, + FCPIO_ICMND_CMPL, + FCPIO_ITMF, + FCPIO_ITMF_CMPL, + + /* + * Target request types + */ + FCPIO_TCMND_16 = 0x11, + FCPIO_TCMND_32, + FCPIO_TDATA, + FCPIO_TXRDY, + FCPIO_TRSP, + FCPIO_TDRSP_CMPL, + FCPIO_TTMF, + FCPIO_TTMF_ACK, + FCPIO_TABORT, + FCPIO_TABORT_CMPL, + + /* + * Misc request types + */ + FCPIO_ACK = 0x20, + FCPIO_RESET, + FCPIO_RESET_CMPL, + FCPIO_FLOGI_REG, + FCPIO_FLOGI_REG_CMPL, + FCPIO_ECHO, + FCPIO_ECHO_CMPL, + FCPIO_LUNMAP_CHNG, + FCPIO_LUNMAP_REQ, + FCPIO_LUNMAP_REQ_CMPL, + FCPIO_FLOGI_FIP_REG, + FCPIO_FLOGI_FIP_REG_CMPL, +}; + +/* + * Header status codes from the firmware + */ +enum fcpio_status { + FCPIO_SUCCESS = 0, /* request was successful */ + + /* + * If a request to the firmware is rejected, the original request + * header will be returned with the status set to one of the following: + */ + FCPIO_INVALID_HEADER, /* header contains invalid data */ + FCPIO_OUT_OF_RESOURCE, /* out of resources to complete request */ + FCPIO_INVALID_PARAM, /* some parameter in request is invalid */ + FCPIO_REQ_NOT_SUPPORTED, /* request type is not supported */ + FCPIO_IO_NOT_FOUND, /* requested I/O was not found */ + + /* + * Once a request is processed, the firmware will usually return + * a cmpl message type. In cases where errors occurred, + * the header status field would be filled in with one of the following: + */ + FCPIO_ABORTED = 0x41, /* request was aborted */ + FCPIO_TIMEOUT, /* request was timed out */ + FCPIO_SGL_INVALID, /* request was aborted due to sgl error */ + FCPIO_MSS_INVALID, /* request was aborted due to mss error */ + FCPIO_DATA_CNT_MISMATCH, /* recv/sent more/less data than exp. */ + FCPIO_FW_ERR, /* request was terminated due to fw error */ + FCPIO_ITMF_REJECTED, /* itmf req was rejected by remote node */ + FCPIO_ITMF_FAILED, /* itmf req was failed by remote node */ + FCPIO_ITMF_INCORRECT_LUN, /* itmf req targeted incorrect LUN */ + FCPIO_CMND_REJECTED, /* request was invalid and rejected */ + FCPIO_NO_PATH_AVAIL, /* no paths to the lun was available */ + FCPIO_PATH_FAILED, /* i/o sent to current path failed */ + FCPIO_LUNMAP_CHNG_PEND, /* i/o rejected due to lunmap change */ +}; + +/* + * The header command tag. All host requests will use the "tag" field + * to mark commands with a unique tag. When the firmware responds to + * a host request, it will copy the tag field into the response. + * + * The only firmware requests that will use the rx_id/ox_id fields instead + * of the tag field will be the target command and target task management + * requests. These two requests do not have corresponding host requests + * since they come directly from the FC initiator on the network. + */ +struct fcpio_tag { + union { + u32 req_id; + struct { + u16 rx_id; + u16 ox_id; + } ex_id; + } u; +}; + +static inline void +fcpio_tag_id_enc(struct fcpio_tag *tag, u32 id) +{ + tag->u.req_id = id; +} + +static inline void +fcpio_tag_id_dec(struct fcpio_tag *tag, u32 *id) +{ + *id = tag->u.req_id; +} + +static inline void +fcpio_tag_exid_enc(struct fcpio_tag *tag, u16 ox_id, u16 rx_id) +{ + tag->u.ex_id.rx_id = rx_id; + tag->u.ex_id.ox_id = ox_id; +} + +static inline void +fcpio_tag_exid_dec(struct fcpio_tag *tag, u16 *ox_id, u16 *rx_id) +{ + *rx_id = tag->u.ex_id.rx_id; + *ox_id = tag->u.ex_id.ox_id; +} + +/* + * The header for an fcpio request, whether from the firmware or from the + * host driver + */ +struct fcpio_header { + u8 type; /* enum fcpio_type */ + u8 status; /* header status entry */ + u16 _resvd; /* reserved */ + struct fcpio_tag tag; /* header tag */ +}; + +static inline void +fcpio_header_enc(struct fcpio_header *hdr, + u8 type, u8 status, + struct fcpio_tag tag) +{ + hdr->type = type; + hdr->status = status; + hdr->_resvd = 0; + hdr->tag = tag; +} + +static inline void +fcpio_header_dec(struct fcpio_header *hdr, + u8 *type, u8 *status, + struct fcpio_tag *tag) +{ + *type = hdr->type; + *status = hdr->status; + *tag = hdr->tag; +} + +#define CDB_16 16 +#define CDB_32 32 +#define LUN_ADDRESS 8 + +/* + * fcpio_icmnd_16: host -> firmware request + * + * used for sending out an initiator SCSI 16-byte command + */ +struct fcpio_icmnd_16 { + u32 lunmap_id; /* index into lunmap table */ + u8 special_req_flags; /* special exchange request flags */ + u8 _resvd0[3]; /* reserved */ + u32 sgl_cnt; /* scatter-gather list count */ + u32 sense_len; /* sense buffer length */ + u64 sgl_addr; /* scatter-gather list addr */ + u64 sense_addr; /* sense buffer address */ + u8 crn; /* SCSI Command Reference No. */ + u8 pri_ta; /* SCSI Priority and Task attribute */ + u8 _resvd1; /* reserved: should be 0 */ + u8 flags; /* command flags */ + u8 scsi_cdb[CDB_16]; /* SCSI Cmnd Descriptor Block */ + u32 data_len; /* length of data expected */ + u8 lun[LUN_ADDRESS]; /* FC vNIC only: LUN address */ + u8 _resvd2; /* reserved */ + u8 d_id[3]; /* FC vNIC only: Target D_ID */ + u16 mss; /* FC vNIC only: max burst */ + u16 _resvd3; /* reserved */ + u32 r_a_tov; /* FC vNIC only: Res. Alloc Timeout */ + u32 e_d_tov; /* FC vNIC only: Err Detect Timeout */ +}; + +/* + * Special request flags + */ +#define FCPIO_ICMND_SRFLAG_RETRY 0x01 /* Enable Retry handling on exchange */ + +/* + * Priority/Task Attribute settings + */ +#define FCPIO_ICMND_PTA_SIMPLE 0 /* simple task attribute */ +#define FCPIO_ICMND_PTA_HEADQ 1 /* head of queue task attribute */ +#define FCPIO_ICMND_PTA_ORDERED 2 /* ordered task attribute */ +#define FCPIO_ICMND_PTA_ACA 4 /* auto contingent allegiance */ +#define FCPIO_ICMND_PRI_SHIFT 3 /* priority field starts in bit 3 */ + +/* + * Command flags + */ +#define FCPIO_ICMND_RDDATA 0x02 /* read data */ +#define FCPIO_ICMND_WRDATA 0x01 /* write data */ + +/* + * fcpio_icmnd_32: host -> firmware request + * + * used for sending out an initiator SCSI 32-byte command + */ +struct fcpio_icmnd_32 { + u32 lunmap_id; /* index into lunmap table */ + u8 special_req_flags; /* special exchange request flags */ + u8 _resvd0[3]; /* reserved */ + u32 sgl_cnt; /* scatter-gather list count */ + u32 sense_len; /* sense buffer length */ + u64 sgl_addr; /* scatter-gather list addr */ + u64 sense_addr; /* sense buffer address */ + u8 crn; /* SCSI Command Reference No. */ + u8 pri_ta; /* SCSI Priority and Task attribute */ + u8 _resvd1; /* reserved: should be 0 */ + u8 flags; /* command flags */ + u8 scsi_cdb[CDB_32]; /* SCSI Cmnd Descriptor Block */ + u32 data_len; /* length of data expected */ + u8 lun[LUN_ADDRESS]; /* FC vNIC only: LUN address */ + u8 _resvd2; /* reserved */ + u8 d_id[3]; /* FC vNIC only: Target D_ID */ + u16 mss; /* FC vNIC only: max burst */ + u16 _resvd3; /* reserved */ + u32 r_a_tov; /* FC vNIC only: Res. Alloc Timeout */ + u32 e_d_tov; /* FC vNIC only: Error Detect Timeout */ +}; + +/* + * fcpio_itmf: host -> firmware request + * + * used for requesting the firmware to abort a request and/or send out + * a task management function + * + * The t_tag field is only needed when the request type is ABT_TASK. + */ +struct fcpio_itmf { + u32 lunmap_id; /* index into lunmap table */ + u32 tm_req; /* SCSI Task Management request */ + u32 t_tag; /* header tag of fcpio to be aborted */ + u32 _resvd; /* _reserved */ + u8 lun[LUN_ADDRESS]; /* FC vNIC only: LUN address */ + u8 _resvd1; /* reserved */ + u8 d_id[3]; /* FC vNIC only: Target D_ID */ + u32 r_a_tov; /* FC vNIC only: R_A_TOV in msec */ + u32 e_d_tov; /* FC vNIC only: E_D_TOV in msec */ +}; + +/* + * Task Management request + */ +enum fcpio_itmf_tm_req_type { + FCPIO_ITMF_ABT_TASK_TERM = 0x01, /* abort task and terminate */ + FCPIO_ITMF_ABT_TASK, /* abort task and issue abts */ + FCPIO_ITMF_ABT_TASK_SET, /* abort task set */ + FCPIO_ITMF_CLR_TASK_SET, /* clear task set */ + FCPIO_ITMF_LUN_RESET, /* logical unit reset task mgmt */ + FCPIO_ITMF_CLR_ACA, /* Clear ACA condition */ +}; + +/* + * fcpio_tdata: host -> firmware request + * + * used for requesting the firmware to send out a read data transfer for a + * target command + */ +struct fcpio_tdata { + u16 rx_id; /* FC rx_id of target command */ + u16 flags; /* command flags */ + u32 rel_offset; /* data sequence relative offset */ + u32 sgl_cnt; /* scatter-gather list count */ + u32 data_len; /* length of data expected to send */ + u64 sgl_addr; /* scatter-gather list address */ +}; + +/* + * Command flags + */ +#define FCPIO_TDATA_SCSI_RSP 0x01 /* send a scsi resp. after last frame */ + +/* + * fcpio_txrdy: host -> firmware request + * + * used for requesting the firmware to send out a write data transfer for a + * target command + */ +struct fcpio_txrdy { + u16 rx_id; /* FC rx_id of target command */ + u16 _resvd0; /* reserved */ + u32 rel_offset; /* data sequence relative offset */ + u32 sgl_cnt; /* scatter-gather list count */ + u32 data_len; /* length of data expected to send */ + u64 sgl_addr; /* scatter-gather list address */ +}; + +/* + * fcpio_trsp: host -> firmware request + * + * used for requesting the firmware to send out a response for a target + * command + */ +struct fcpio_trsp { + u16 rx_id; /* FC rx_id of target command */ + u16 _resvd0; /* reserved */ + u32 sense_len; /* sense data buffer length */ + u64 sense_addr; /* sense data buffer address */ + u16 _resvd1; /* reserved */ + u8 flags; /* response request flags */ + u8 scsi_status; /* SCSI status */ + u32 residual; /* SCSI data residual value of I/O */ +}; + +/* + * resposnse request flags + */ +#define FCPIO_TRSP_RESID_UNDER 0x08 /* residual is valid and is underflow */ +#define FCPIO_TRSP_RESID_OVER 0x04 /* residual is valid and is overflow */ + +/* + * fcpio_ttmf_ack: host -> firmware response + * + * used by the host to indicate to the firmware it has received and processed + * the target tmf request + */ +struct fcpio_ttmf_ack { + u16 rx_id; /* FC rx_id of target command */ + u16 _resvd0; /* reserved */ + u32 tmf_status; /* SCSI task management status */ +}; + +/* + * fcpio_tabort: host -> firmware request + * + * used by the host to request the firmware to abort a target request that was + * received by the firmware + */ +struct fcpio_tabort { + u16 rx_id; /* rx_id of the target request */ +}; + +/* + * fcpio_reset: host -> firmware request + * + * used by the host to signal a reset of the driver to the firmware + * and to request firmware to clean up all outstanding I/O + */ +struct fcpio_reset { + u32 _resvd; +}; + +enum fcpio_flogi_reg_format_type { + FCPIO_FLOGI_REG_DEF_DEST = 0, /* Use the oui | s_id mac format */ + FCPIO_FLOGI_REG_GW_DEST, /* Use the fixed gateway mac */ +}; + +/* + * fcpio_flogi_reg: host -> firmware request + * + * fc vnic only + * used by the host to notify the firmware of the lif's s_id + * and destination mac address format + */ +struct fcpio_flogi_reg { + u8 format; + u8 s_id[3]; /* FC vNIC only: Source S_ID */ + u8 gateway_mac[ETH_ALEN]; /* Destination gateway mac */ + u16 _resvd; + u32 r_a_tov; /* R_A_TOV in msec */ + u32 e_d_tov; /* E_D_TOV in msec */ +}; + +/* + * fcpio_echo: host -> firmware request + * + * sends a heartbeat echo request to the firmware + */ +struct fcpio_echo { + u32 _resvd; +}; + +/* + * fcpio_lunmap_req: host -> firmware request + * + * scsi vnic only + * sends a request to retrieve the lunmap table for scsi vnics + */ +struct fcpio_lunmap_req { + u64 addr; /* address of the buffer */ + u32 len; /* len of the buffer */ +}; + +/* + * fcpio_flogi_fip_reg: host -> firmware request + * + * fc vnic only + * used by the host to notify the firmware of the lif's s_id + * and destination mac address format + */ +struct fcpio_flogi_fip_reg { + u8 _resvd0; + u8 s_id[3]; /* FC vNIC only: Source S_ID */ + u8 fcf_mac[ETH_ALEN]; /* FCF Target destination mac */ + u16 _resvd1; + u32 r_a_tov; /* R_A_TOV in msec */ + u32 e_d_tov; /* E_D_TOV in msec */ + u8 ha_mac[ETH_ALEN]; /* Host adapter source mac */ + u16 _resvd2; +}; + +/* + * Basic structure for all fcpio structures that are sent from the host to the + * firmware. They are 128 bytes per structure. + */ +#define FCPIO_HOST_REQ_LEN 128 /* expected length of host requests */ + +struct fcpio_host_req { + struct fcpio_header hdr; + + union { + /* + * Defines space needed for request + */ + u8 buf[FCPIO_HOST_REQ_LEN - sizeof(struct fcpio_header)]; + + /* + * Initiator host requests + */ + struct fcpio_icmnd_16 icmnd_16; + struct fcpio_icmnd_32 icmnd_32; + struct fcpio_itmf itmf; + + /* + * Target host requests + */ + struct fcpio_tdata tdata; + struct fcpio_txrdy txrdy; + struct fcpio_trsp trsp; + struct fcpio_ttmf_ack ttmf_ack; + struct fcpio_tabort tabort; + + /* + * Misc requests + */ + struct fcpio_reset reset; + struct fcpio_flogi_reg flogi_reg; + struct fcpio_echo echo; + struct fcpio_lunmap_req lunmap_req; + struct fcpio_flogi_fip_reg flogi_fip_reg; + } u; +}; + +/* + * fcpio_icmnd_cmpl: firmware -> host response + * + * used for sending the host a response to an initiator command + */ +struct fcpio_icmnd_cmpl { + u8 _resvd0[6]; /* reserved */ + u8 flags; /* response flags */ + u8 scsi_status; /* SCSI status */ + u32 residual; /* SCSI data residual length */ + u32 sense_len; /* SCSI sense length */ +}; + +/* + * response flags + */ +#define FCPIO_ICMND_CMPL_RESID_UNDER 0x08 /* resid under and valid */ +#define FCPIO_ICMND_CMPL_RESID_OVER 0x04 /* resid over and valid */ + +/* + * fcpio_itmf_cmpl: firmware -> host response + * + * used for sending the host a response for a itmf request + */ +struct fcpio_itmf_cmpl { + u32 _resvd; /* reserved */ +}; + +/* + * fcpio_tcmnd_16: firmware -> host request + * + * used by the firmware to notify the host of an incoming target SCSI 16-Byte + * request + */ +struct fcpio_tcmnd_16 { + u8 lun[LUN_ADDRESS]; /* FC vNIC only: LUN address */ + u8 crn; /* SCSI Command Reference No. */ + u8 pri_ta; /* SCSI Priority and Task attribute */ + u8 _resvd2; /* reserved: should be 0 */ + u8 flags; /* command flags */ + u8 scsi_cdb[CDB_16]; /* SCSI Cmnd Descriptor Block */ + u32 data_len; /* length of data expected */ + u8 _resvd1; /* reserved */ + u8 s_id[3]; /* FC vNIC only: Source S_ID */ +}; + +/* + * Priority/Task Attribute settings + */ +#define FCPIO_TCMND_PTA_SIMPLE 0 /* simple task attribute */ +#define FCPIO_TCMND_PTA_HEADQ 1 /* head of queue task attribute */ +#define FCPIO_TCMND_PTA_ORDERED 2 /* ordered task attribute */ +#define FCPIO_TCMND_PTA_ACA 4 /* auto contingent allegiance */ +#define FCPIO_TCMND_PRI_SHIFT 3 /* priority field starts in bit 3 */ + +/* + * Command flags + */ +#define FCPIO_TCMND_RDDATA 0x02 /* read data */ +#define FCPIO_TCMND_WRDATA 0x01 /* write data */ + +/* + * fcpio_tcmnd_32: firmware -> host request + * + * used by the firmware to notify the host of an incoming target SCSI 32-Byte + * request + */ +struct fcpio_tcmnd_32 { + u8 lun[LUN_ADDRESS]; /* FC vNIC only: LUN address */ + u8 crn; /* SCSI Command Reference No. */ + u8 pri_ta; /* SCSI Priority and Task attribute */ + u8 _resvd2; /* reserved: should be 0 */ + u8 flags; /* command flags */ + u8 scsi_cdb[CDB_32]; /* SCSI Cmnd Descriptor Block */ + u32 data_len; /* length of data expected */ + u8 _resvd0; /* reserved */ + u8 s_id[3]; /* FC vNIC only: Source S_ID */ +}; + +/* + * fcpio_tdrsp_cmpl: firmware -> host response + * + * used by the firmware to notify the host of a response to a host target + * command + */ +struct fcpio_tdrsp_cmpl { + u16 rx_id; /* rx_id of the target request */ + u16 _resvd0; /* reserved */ +}; + +/* + * fcpio_ttmf: firmware -> host request + * + * used by the firmware to notify the host of an incoming task management + * function request + */ +struct fcpio_ttmf { + u8 _resvd0; /* reserved */ + u8 s_id[3]; /* FC vNIC only: Source S_ID */ + u8 lun[LUN_ADDRESS]; /* FC vNIC only: LUN address */ + u8 crn; /* SCSI Command Reference No. */ + u8 _resvd2[3]; /* reserved */ + u32 tmf_type; /* task management request type */ +}; + +/* + * Task Management request + */ +#define FCPIO_TTMF_CLR_ACA 0x40 /* Clear ACA condition */ +#define FCPIO_TTMF_LUN_RESET 0x10 /* logical unit reset task mgmt */ +#define FCPIO_TTMF_CLR_TASK_SET 0x04 /* clear task set */ +#define FCPIO_TTMF_ABT_TASK_SET 0x02 /* abort task set */ +#define FCPIO_TTMF_ABT_TASK 0x01 /* abort task */ + +/* + * fcpio_tabort_cmpl: firmware -> host response + * + * used by the firmware to respond to a host's tabort request + */ +struct fcpio_tabort_cmpl { + u16 rx_id; /* rx_id of the target request */ + u16 _resvd0; /* reserved */ +}; + +/* + * fcpio_ack: firmware -> host response + * + * used by firmware to notify the host of the last work request received + */ +struct fcpio_ack { + u16 request_out; /* last host entry received */ + u16 _resvd; +}; + +/* + * fcpio_reset_cmpl: firmware -> host response + * + * use by firmware to respond to the host's reset request + */ +struct fcpio_reset_cmpl { + u16 vnic_id; +}; + +/* + * fcpio_flogi_reg_cmpl: firmware -> host response + * + * fc vnic only + * response to the fcpio_flogi_reg request + */ +struct fcpio_flogi_reg_cmpl { + u32 _resvd; +}; + +/* + * fcpio_echo_cmpl: firmware -> host response + * + * response to the fcpio_echo request + */ +struct fcpio_echo_cmpl { + u32 _resvd; +}; + +/* + * fcpio_lunmap_chng: firmware -> host notification + * + * scsi vnic only + * notifies the host that the lunmap tables have changed + */ +struct fcpio_lunmap_chng { + u32 _resvd; +}; + +/* + * fcpio_lunmap_req_cmpl: firmware -> host response + * + * scsi vnic only + * response for lunmap table request from the host + */ +struct fcpio_lunmap_req_cmpl { + u32 _resvd; +}; + +/* + * Basic structure for all fcpio structures that are sent from the firmware to + * the host. They are 64 bytes per structure. + */ +#define FCPIO_FW_REQ_LEN 64 /* expected length of fw requests */ +struct fcpio_fw_req { + struct fcpio_header hdr; + + union { + /* + * Defines space needed for request + */ + u8 buf[FCPIO_FW_REQ_LEN - sizeof(struct fcpio_header)]; + + /* + * Initiator firmware responses + */ + struct fcpio_icmnd_cmpl icmnd_cmpl; + struct fcpio_itmf_cmpl itmf_cmpl; + + /* + * Target firmware new requests + */ + struct fcpio_tcmnd_16 tcmnd_16; + struct fcpio_tcmnd_32 tcmnd_32; + + /* + * Target firmware responses + */ + struct fcpio_tdrsp_cmpl tdrsp_cmpl; + struct fcpio_ttmf ttmf; + struct fcpio_tabort_cmpl tabort_cmpl; + + /* + * Firmware response to work received + */ + struct fcpio_ack ack; + + /* + * Misc requests + */ + struct fcpio_reset_cmpl reset_cmpl; + struct fcpio_flogi_reg_cmpl flogi_reg_cmpl; + struct fcpio_echo_cmpl echo_cmpl; + struct fcpio_lunmap_chng lunmap_chng; + struct fcpio_lunmap_req_cmpl lunmap_req_cmpl; + } u; +}; + +/* + * Access routines to encode and decode the color bit, which is the most + * significant bit of the MSB of the structure + */ +static inline void fcpio_color_enc(struct fcpio_fw_req *fw_req, u8 color) +{ + u8 *c = ((u8 *) fw_req) + sizeof(struct fcpio_fw_req) - 1; + + if (color) + *c |= 0x80; + else + *c &= ~0x80; +} + +static inline void fcpio_color_dec(struct fcpio_fw_req *fw_req, u8 *color) +{ + u8 *c = ((u8 *) fw_req) + sizeof(struct fcpio_fw_req) - 1; + + *color = *c >> 7; + + /* + * Make sure color bit is read from desc *before* other fields + * are read from desc. Hardware guarantees color bit is last + * bit (byte) written. Adding the rmb() prevents the compiler + * and/or CPU from reordering the reads which would potentially + * result in reading stale values. + */ + + rmb(); + +} + +/* + * Lunmap table entry for scsi vnics + */ +#define FCPIO_LUNMAP_TABLE_SIZE 256 +#define FCPIO_FLAGS_LUNMAP_VALID 0x80 +#define FCPIO_FLAGS_BOOT 0x01 +struct fcpio_lunmap_entry { + u8 bus; + u8 target; + u8 lun; + u8 path_cnt; + u16 flags; + u16 update_cnt; +}; + +struct fcpio_lunmap_tbl { + u32 update_cnt; + struct fcpio_lunmap_entry lunmaps[FCPIO_LUNMAP_TABLE_SIZE]; +}; + +#endif /* _FCPIO_H_ */ diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h new file mode 100644 index 00000000000..e4c0a3d7d87 --- /dev/null +++ b/drivers/scsi/fnic/fnic.h @@ -0,0 +1,265 @@ +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef _FNIC_H_ +#define _FNIC_H_ + +#include +#include +#include +#include +#include "fnic_io.h" +#include "fnic_res.h" +#include "vnic_dev.h" +#include "vnic_wq.h" +#include "vnic_rq.h" +#include "vnic_cq.h" +#include "vnic_wq_copy.h" +#include "vnic_intr.h" +#include "vnic_stats.h" +#include "vnic_scsi.h" + +#define DRV_NAME "fnic" +#define DRV_DESCRIPTION "Cisco FCoE HBA Driver" +#define DRV_VERSION "1.0.0.1121" +#define PFX DRV_NAME ": " +#define DFX DRV_NAME "%d: " + +#define DESC_CLEAN_LOW_WATERMARK 8 +#define FNIC_MAX_IO_REQ 2048 /* scsi_cmnd tag map entries */ +#define FNIC_IO_LOCKS 64 /* IO locks: power of 2 */ +#define FNIC_DFLT_QUEUE_DEPTH 32 +#define FNIC_STATS_RATE_LIMIT 4 /* limit rate at which stats are pulled up */ + +/* + * Tag bits used for special requests. + */ +#define BIT(nr) (1UL << (nr)) +#define FNIC_TAG_ABORT BIT(30) /* tag bit indicating abort */ +#define FNIC_TAG_DEV_RST BIT(29) /* indicates device reset */ +#define FNIC_TAG_MASK (BIT(24) - 1) /* mask for lookup */ +#define FNIC_NO_TAG -1 + +/* + * Usage of the scsi_cmnd scratchpad. + * These fields are locked by the hashed io_req_lock. + */ +#define CMD_SP(Cmnd) ((Cmnd)->SCp.ptr) +#define CMD_STATE(Cmnd) ((Cmnd)->SCp.phase) +#define CMD_ABTS_STATUS(Cmnd) ((Cmnd)->SCp.Message) +#define CMD_LR_STATUS(Cmnd) ((Cmnd)->SCp.have_data_in) +#define CMD_TAG(Cmnd) ((Cmnd)->SCp.sent_command) + +#define FCPIO_INVALID_CODE 0x100 /* hdr_status value unused by firmware */ + +#define FNIC_LUN_RESET_TIMEOUT 10000 /* mSec */ +#define FNIC_HOST_RESET_TIMEOUT 10000 /* mSec */ +#define FNIC_RMDEVICE_TIMEOUT 1000 /* mSec */ +#define FNIC_HOST_RESET_SETTLE_TIME 30 /* Sec */ + +#define FNIC_MAX_FCP_TARGET 256 + +extern unsigned int fnic_log_level; + +#define FNIC_MAIN_LOGGING 0x01 +#define FNIC_FCS_LOGGING 0x02 +#define FNIC_SCSI_LOGGING 0x04 +#define FNIC_ISR_LOGGING 0x08 + +#define FNIC_CHECK_LOGGING(LEVEL, CMD) \ +do { \ + if (unlikely(fnic_log_level & LEVEL)) \ + do { \ + CMD; \ + } while (0); \ +} while (0) + +#define FNIC_MAIN_DBG(kern_level, host, fmt, args...) \ + FNIC_CHECK_LOGGING(FNIC_MAIN_LOGGING, \ + shost_printk(kern_level, host, fmt, ##args);) + +#define FNIC_FCS_DBG(kern_level, host, fmt, args...) \ + FNIC_CHECK_LOGGING(FNIC_FCS_LOGGING, \ + shost_printk(kern_level, host, fmt, ##args);) + +#define FNIC_SCSI_DBG(kern_level, host, fmt, args...) \ + FNIC_CHECK_LOGGING(FNIC_SCSI_LOGGING, \ + shost_printk(kern_level, host, fmt, ##args);) + +#define FNIC_ISR_DBG(kern_level, host, fmt, args...) \ + FNIC_CHECK_LOGGING(FNIC_ISR_LOGGING, \ + shost_printk(kern_level, host, fmt, ##args);) + +extern const char *fnic_state_str[]; + +enum fnic_intx_intr_index { + FNIC_INTX_WQ_RQ_COPYWQ, + FNIC_INTX_ERR, + FNIC_INTX_NOTIFY, + FNIC_INTX_INTR_MAX, +}; + +enum fnic_msix_intr_index { + FNIC_MSIX_RQ, + FNIC_MSIX_WQ, + FNIC_MSIX_WQ_COPY, + FNIC_MSIX_ERR_NOTIFY, + FNIC_MSIX_INTR_MAX, +}; + +struct fnic_msix_entry { + int requested; + char devname[IFNAMSIZ]; + irqreturn_t (*isr)(int, void *); + void *devid; +}; + +enum fnic_state { + FNIC_IN_FC_MODE = 0, + FNIC_IN_FC_TRANS_ETH_MODE, + FNIC_IN_ETH_MODE, + FNIC_IN_ETH_TRANS_FC_MODE, +}; + +#define FNIC_WQ_COPY_MAX 1 +#define FNIC_WQ_MAX 1 +#define FNIC_RQ_MAX 1 +#define FNIC_CQ_MAX (FNIC_WQ_COPY_MAX + FNIC_WQ_MAX + FNIC_RQ_MAX) + +struct mempool; + +/* Per-instance private data structure */ +struct fnic { + struct fc_lport *lport; + struct vnic_dev_bar bar0; + + struct msix_entry msix_entry[FNIC_MSIX_INTR_MAX]; + struct fnic_msix_entry msix[FNIC_MSIX_INTR_MAX]; + + struct vnic_stats *stats; + unsigned long stats_time; /* time of stats update */ + struct vnic_nic_cfg *nic_cfg; + char name[IFNAMSIZ]; + struct timer_list notify_timer; /* used for MSI interrupts */ + + unsigned int err_intr_offset; + unsigned int link_intr_offset; + + unsigned int wq_count; + unsigned int cq_count; + + u32 fcoui_mode:1; /* use fcoui address*/ + u32 vlan_hw_insert:1; /* let hw insert the tag */ + u32 in_remove:1; /* fnic device in removal */ + u32 stop_rx_link_events:1; /* stop proc. rx frames, link events */ + + struct completion *remove_wait; /* device remove thread blocks */ + + struct fc_frame *flogi; + struct fc_frame *flogi_resp; + u16 flogi_oxid; + unsigned long s_id; + enum fnic_state state; + spinlock_t fnic_lock; + + u16 vlan_id; /* VLAN tag including priority */ + u8 mac_addr[ETH_ALEN]; + u8 dest_addr[ETH_ALEN]; + u8 data_src_addr[ETH_ALEN]; + u64 fcp_input_bytes; /* internal statistic */ + u64 fcp_output_bytes; /* internal statistic */ + u32 link_down_cnt; + int link_status; + + struct list_head list; + struct pci_dev *pdev; + struct vnic_fc_config config; + struct vnic_dev *vdev; + unsigned int raw_wq_count; + unsigned int wq_copy_count; + unsigned int rq_count; + int fw_ack_index[FNIC_WQ_COPY_MAX]; + unsigned short fw_ack_recd[FNIC_WQ_COPY_MAX]; + unsigned short wq_copy_desc_low[FNIC_WQ_COPY_MAX]; + unsigned int intr_count; + u32 __iomem *legacy_pba; + struct fnic_host_tag *tags; + mempool_t *io_req_pool; + mempool_t *io_sgl_pool[FNIC_SGL_NUM_CACHES]; + spinlock_t io_req_lock[FNIC_IO_LOCKS]; /* locks for scsi cmnds */ + + struct work_struct link_work; + struct work_struct frame_work; + struct sk_buff_head frame_queue; + + /* copy work queue cache line section */ + ____cacheline_aligned struct vnic_wq_copy wq_copy[FNIC_WQ_COPY_MAX]; + /* completion queue cache line section */ + ____cacheline_aligned struct vnic_cq cq[FNIC_CQ_MAX]; + + spinlock_t wq_copy_lock[FNIC_WQ_COPY_MAX]; + + /* work queue cache line section */ + ____cacheline_aligned struct vnic_wq wq[FNIC_WQ_MAX]; + spinlock_t wq_lock[FNIC_WQ_MAX]; + + /* receive queue cache line section */ + ____cacheline_aligned struct vnic_rq rq[FNIC_RQ_MAX]; + + /* interrupt resource cache line section */ + ____cacheline_aligned struct vnic_intr intr[FNIC_MSIX_INTR_MAX]; +}; + +extern struct workqueue_struct *fnic_event_queue; +extern struct device_attribute *fnic_attrs[]; + +void fnic_clear_intr_mode(struct fnic *fnic); +int fnic_set_intr_mode(struct fnic *fnic); +void fnic_free_intr(struct fnic *fnic); +int fnic_request_intr(struct fnic *fnic); + +int fnic_send(struct fc_lport *, struct fc_frame *); +void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf); +void fnic_handle_frame(struct work_struct *work); +void fnic_handle_link(struct work_struct *work); +int fnic_rq_cmpl_handler(struct fnic *fnic, int); +int fnic_alloc_rq_frame(struct vnic_rq *rq); +void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf); +int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp); + +int fnic_queuecommand(struct scsi_cmnd *, void (*done)(struct scsi_cmnd *)); +int fnic_abort_cmd(struct scsi_cmnd *); +int fnic_device_reset(struct scsi_cmnd *); +int fnic_host_reset(struct scsi_cmnd *); +int fnic_reset(struct Scsi_Host *); +void fnic_scsi_cleanup(struct fc_lport *); +void fnic_scsi_abort_io(struct fc_lport *); +void fnic_empty_scsi_cleanup(struct fc_lport *); +void fnic_exch_mgr_reset(struct fc_lport *, u32, u32); +int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int); +int fnic_wq_cmpl_handler(struct fnic *fnic, int); +int fnic_flogi_reg_handler(struct fnic *fnic); +void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq, + struct fcpio_host_req *desc); +int fnic_fw_reset_handler(struct fnic *fnic); +void fnic_terminate_rport_io(struct fc_rport *); +const char *fnic_state_to_str(unsigned int state); + +void fnic_log_q_error(struct fnic *fnic); +void fnic_handle_link_event(struct fnic *fnic); + +#endif /* _FNIC_H_ */ diff --git a/drivers/scsi/fnic/fnic_attrs.c b/drivers/scsi/fnic/fnic_attrs.c new file mode 100644 index 00000000000..aea0c3becfd --- /dev/null +++ b/drivers/scsi/fnic/fnic_attrs.c @@ -0,0 +1,56 @@ +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include +#include +#include +#include "fnic.h" + +static ssize_t fnic_show_state(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct fc_lport *lp = shost_priv(class_to_shost(dev)); + struct fnic *fnic = lport_priv(lp); + + return snprintf(buf, PAGE_SIZE, "%s\n", fnic_state_str[fnic->state]); +} + +static ssize_t fnic_show_drv_version(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%s\n", DRV_VERSION); +} + +static ssize_t fnic_show_link_state(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct fc_lport *lp = shost_priv(class_to_shost(dev)); + + return snprintf(buf, PAGE_SIZE, "%s\n", (lp->link_up) + ? "Link Up" : "Link Down"); +} + +static DEVICE_ATTR(fnic_state, S_IRUGO, fnic_show_state, NULL); +static DEVICE_ATTR(drv_version, S_IRUGO, fnic_show_drv_version, NULL); +static DEVICE_ATTR(link_state, S_IRUGO, fnic_show_link_state, NULL); + +struct device_attribute *fnic_attrs[] = { + &dev_attr_fnic_state, + &dev_attr_drv_version, + &dev_attr_link_state, + NULL, +}; diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c new file mode 100644 index 00000000000..07e6eedb83c --- /dev/null +++ b/drivers/scsi/fnic/fnic_fcs.c @@ -0,0 +1,742 @@ +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "fnic_io.h" +#include "fnic.h" +#include "cq_enet_desc.h" +#include "cq_exch_desc.h" + +struct workqueue_struct *fnic_event_queue; + +void fnic_handle_link(struct work_struct *work) +{ + struct fnic *fnic = container_of(work, struct fnic, link_work); + unsigned long flags; + int old_link_status; + u32 old_link_down_cnt; + + spin_lock_irqsave(&fnic->fnic_lock, flags); + + if (fnic->stop_rx_link_events) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return; + } + + old_link_down_cnt = fnic->link_down_cnt; + old_link_status = fnic->link_status; + fnic->link_status = vnic_dev_link_status(fnic->vdev); + fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev); + + if (old_link_status == fnic->link_status) { + if (!fnic->link_status) + /* DOWN -> DOWN */ + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + else { + if (old_link_down_cnt != fnic->link_down_cnt) { + /* UP -> DOWN -> UP */ + fnic->lport->host_stats.link_failure_count++; + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, + "link down\n"); + fc_linkdown(fnic->lport); + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, + "link up\n"); + fc_linkup(fnic->lport); + } else + /* UP -> UP */ + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + } + } else if (fnic->link_status) { + /* DOWN -> UP */ + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n"); + fc_linkup(fnic->lport); + } else { + /* UP -> DOWN */ + fnic->lport->host_stats.link_failure_count++; + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n"); + fc_linkdown(fnic->lport); + } + +} + +/* + * This function passes incoming fabric frames to libFC + */ +void fnic_handle_frame(struct work_struct *work) +{ + struct fnic *fnic = container_of(work, struct fnic, frame_work); + struct fc_lport *lp = fnic->lport; + unsigned long flags; + struct sk_buff *skb; + struct fc_frame *fp; + + while ((skb = skb_dequeue(&fnic->frame_queue))) { + + spin_lock_irqsave(&fnic->fnic_lock, flags); + if (fnic->stop_rx_link_events) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + dev_kfree_skb(skb); + return; + } + fp = (struct fc_frame *)skb; + /* if Flogi resp frame, register the address */ + if (fr_flags(fp)) { + vnic_dev_add_addr(fnic->vdev, + fnic->data_src_addr); + fr_flags(fp) = 0; + } + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + fc_exch_recv(lp, lp->emp, fp); + } + +} + +static inline void fnic_import_rq_fc_frame(struct sk_buff *skb, + u32 len, u8 sof, u8 eof) +{ + struct fc_frame *fp = (struct fc_frame *)skb; + + skb_trim(skb, len); + fr_eof(fp) = eof; + fr_sof(fp) = sof; +} + + +static inline int fnic_import_rq_eth_pkt(struct sk_buff *skb, u32 len) +{ + struct fc_frame *fp; + struct ethhdr *eh; + struct vlan_ethhdr *vh; + struct fcoe_hdr *fcoe_hdr; + struct fcoe_crc_eof *ft; + u32 transport_len = 0; + + eh = (struct ethhdr *)skb->data; + vh = (struct vlan_ethhdr *)skb->data; + if (vh->h_vlan_proto == htons(ETH_P_8021Q) && + vh->h_vlan_encapsulated_proto == htons(ETH_P_FCOE)) { + skb_pull(skb, sizeof(struct vlan_ethhdr)); + transport_len += sizeof(struct vlan_ethhdr); + } else if (eh->h_proto == htons(ETH_P_FCOE)) { + transport_len += sizeof(struct ethhdr); + skb_pull(skb, sizeof(struct ethhdr)); + } else + return -1; + + fcoe_hdr = (struct fcoe_hdr *)skb->data; + if (FC_FCOE_DECAPS_VER(fcoe_hdr) != FC_FCOE_VER) + return -1; + + fp = (struct fc_frame *)skb; + fc_frame_init(fp); + fr_sof(fp) = fcoe_hdr->fcoe_sof; + skb_pull(skb, sizeof(struct fcoe_hdr)); + transport_len += sizeof(struct fcoe_hdr); + + ft = (struct fcoe_crc_eof *)(skb->data + len - + transport_len - sizeof(*ft)); + fr_eof(fp) = ft->fcoe_eof; + skb_trim(skb, len - transport_len - sizeof(*ft)); + return 0; +} + +static inline int fnic_handle_flogi_resp(struct fnic *fnic, + struct fc_frame *fp) +{ + u8 mac[ETH_ALEN] = FC_FCOE_FLOGI_MAC; + struct ethhdr *eth_hdr; + struct fc_frame_header *fh; + int ret = 0; + unsigned long flags; + struct fc_frame *old_flogi_resp = NULL; + + fh = (struct fc_frame_header *)fr_hdr(fp); + + spin_lock_irqsave(&fnic->fnic_lock, flags); + + if (fnic->state == FNIC_IN_ETH_MODE) { + + /* + * Check if oxid matches on taking the lock. A new Flogi + * issued by libFC might have changed the fnic cached oxid + */ + if (fnic->flogi_oxid != ntohs(fh->fh_ox_id)) { + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, + "Flogi response oxid not" + " matching cached oxid, dropping frame" + "\n"); + ret = -1; + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + dev_kfree_skb_irq(fp_skb(fp)); + goto handle_flogi_resp_end; + } + + /* Drop older cached flogi response frame, cache this frame */ + old_flogi_resp = fnic->flogi_resp; + fnic->flogi_resp = fp; + fnic->flogi_oxid = FC_XID_UNKNOWN; + + /* + * this frame is part of flogi get the src mac addr from this + * frame if the src mac is fcoui based then we mark the + * address mode flag to use fcoui base for dst mac addr + * otherwise we have to store the fcoe gateway addr + */ + eth_hdr = (struct ethhdr *)skb_mac_header(fp_skb(fp)); + memcpy(mac, eth_hdr->h_source, ETH_ALEN); + + if (ntoh24(mac) == FC_FCOE_OUI) + fnic->fcoui_mode = 1; + else { + fnic->fcoui_mode = 0; + memcpy(fnic->dest_addr, mac, ETH_ALEN); + } + + /* + * Except for Flogi frame, all outbound frames from us have the + * Eth Src address as FC_FCOE_OUI"our_sid". Flogi frame uses + * the vnic MAC address as the Eth Src address + */ + fc_fcoe_set_mac(fnic->data_src_addr, fh->fh_d_id); + + /* We get our s_id from the d_id of the flogi resp frame */ + fnic->s_id = ntoh24(fh->fh_d_id); + + /* Change state to reflect transition from Eth to FC mode */ + fnic->state = FNIC_IN_ETH_TRANS_FC_MODE; + + } else { + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, + "Unexpected fnic state %s while" + " processing flogi resp\n", + fnic_state_to_str(fnic->state)); + ret = -1; + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + dev_kfree_skb_irq(fp_skb(fp)); + goto handle_flogi_resp_end; + } + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + /* Drop older cached frame */ + if (old_flogi_resp) + dev_kfree_skb_irq(fp_skb(old_flogi_resp)); + + /* + * send flogi reg request to firmware, this will put the fnic in + * in FC mode + */ + ret = fnic_flogi_reg_handler(fnic); + + if (ret < 0) { + int free_fp = 1; + spin_lock_irqsave(&fnic->fnic_lock, flags); + /* + * free the frame is some other thread is not + * pointing to it + */ + if (fnic->flogi_resp != fp) + free_fp = 0; + else + fnic->flogi_resp = NULL; + + if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE) + fnic->state = FNIC_IN_ETH_MODE; + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + if (free_fp) + dev_kfree_skb_irq(fp_skb(fp)); + } + + handle_flogi_resp_end: + return ret; +} + +/* Returns 1 for a response that matches cached flogi oxid */ +static inline int is_matching_flogi_resp_frame(struct fnic *fnic, + struct fc_frame *fp) +{ + struct fc_frame_header *fh; + int ret = 0; + u32 f_ctl; + + fh = fc_frame_header_get(fp); + f_ctl = ntoh24(fh->fh_f_ctl); + + if (fnic->flogi_oxid == ntohs(fh->fh_ox_id) && + fh->fh_r_ctl == FC_RCTL_ELS_REP && + (f_ctl & (FC_FC_EX_CTX | FC_FC_SEQ_CTX)) == FC_FC_EX_CTX && + fh->fh_type == FC_TYPE_ELS) + ret = 1; + + return ret; +} + +static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc + *cq_desc, struct vnic_rq_buf *buf, + int skipped __attribute__((unused)), + void *opaque) +{ + struct fnic *fnic = vnic_dev_priv(rq->vdev); + struct sk_buff *skb; + struct fc_frame *fp; + unsigned int eth_hdrs_stripped; + u8 type, color, eop, sop, ingress_port, vlan_stripped; + u8 fcoe = 0, fcoe_sof, fcoe_eof; + u8 fcoe_fc_crc_ok = 1, fcoe_enc_error = 0; + u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok; + u8 ipv6, ipv4, ipv4_fragment, rss_type, csum_not_calc; + u8 fcs_ok = 1, packet_error = 0; + u16 q_number, completed_index, bytes_written = 0, vlan, checksum; + u32 rss_hash; + u16 exchange_id, tmpl; + u8 sof = 0; + u8 eof = 0; + u32 fcp_bytes_written = 0; + unsigned long flags; + + pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len, + PCI_DMA_FROMDEVICE); + skb = buf->os_buf; + buf->os_buf = NULL; + + cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index); + if (type == CQ_DESC_TYPE_RQ_FCP) { + cq_fcp_rq_desc_dec((struct cq_fcp_rq_desc *)cq_desc, + &type, &color, &q_number, &completed_index, + &eop, &sop, &fcoe_fc_crc_ok, &exchange_id, + &tmpl, &fcp_bytes_written, &sof, &eof, + &ingress_port, &packet_error, + &fcoe_enc_error, &fcs_ok, &vlan_stripped, + &vlan); + eth_hdrs_stripped = 1; + + } else if (type == CQ_DESC_TYPE_RQ_ENET) { + cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc, + &type, &color, &q_number, &completed_index, + &ingress_port, &fcoe, &eop, &sop, + &rss_type, &csum_not_calc, &rss_hash, + &bytes_written, &packet_error, + &vlan_stripped, &vlan, &checksum, + &fcoe_sof, &fcoe_fc_crc_ok, + &fcoe_enc_error, &fcoe_eof, + &tcp_udp_csum_ok, &udp, &tcp, + &ipv4_csum_ok, &ipv6, &ipv4, + &ipv4_fragment, &fcs_ok); + eth_hdrs_stripped = 0; + + } else { + /* wrong CQ type*/ + shost_printk(KERN_ERR, fnic->lport->host, + "fnic rq_cmpl wrong cq type x%x\n", type); + goto drop; + } + + if (!fcs_ok || packet_error || !fcoe_fc_crc_ok || fcoe_enc_error) { + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, + "fnic rq_cmpl fcoe x%x fcsok x%x" + " pkterr x%x fcoe_fc_crc_ok x%x, fcoe_enc_err" + " x%x\n", + fcoe, fcs_ok, packet_error, + fcoe_fc_crc_ok, fcoe_enc_error); + goto drop; + } + + if (eth_hdrs_stripped) + fnic_import_rq_fc_frame(skb, fcp_bytes_written, sof, eof); + else if (fnic_import_rq_eth_pkt(skb, bytes_written)) + goto drop; + + fp = (struct fc_frame *)skb; + + /* + * If frame is an ELS response that matches the cached FLOGI OX_ID, + * and is accept, issue flogi_reg_request copy wq request to firmware + * to register the S_ID and determine whether FC_OUI mode or GW mode. + */ + if (is_matching_flogi_resp_frame(fnic, fp)) { + if (!eth_hdrs_stripped) { + if (fc_frame_payload_op(fp) == ELS_LS_ACC) { + fnic_handle_flogi_resp(fnic, fp); + return; + } + /* + * Recd. Flogi reject. No point registering + * with fw, but forward to libFC + */ + goto forward; + } + goto drop; + } + if (!eth_hdrs_stripped) + goto drop; + +forward: + spin_lock_irqsave(&fnic->fnic_lock, flags); + if (fnic->stop_rx_link_events) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + goto drop; + } + /* Use fr_flags to indicate whether succ. flogi resp or not */ + fr_flags(fp) = 0; + fr_dev(fp) = fnic->lport; + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + skb_queue_tail(&fnic->frame_queue, skb); + queue_work(fnic_event_queue, &fnic->frame_work); + + return; +drop: + dev_kfree_skb_irq(skb); +} + +static int fnic_rq_cmpl_handler_cont(struct vnic_dev *vdev, + struct cq_desc *cq_desc, u8 type, + u16 q_number, u16 completed_index, + void *opaque) +{ + struct fnic *fnic = vnic_dev_priv(vdev); + + vnic_rq_service(&fnic->rq[q_number], cq_desc, completed_index, + VNIC_RQ_RETURN_DESC, fnic_rq_cmpl_frame_recv, + NULL); + return 0; +} + +int fnic_rq_cmpl_handler(struct fnic *fnic, int rq_work_to_do) +{ + unsigned int tot_rq_work_done = 0, cur_work_done; + unsigned int i; + int err; + + for (i = 0; i < fnic->rq_count; i++) { + cur_work_done = vnic_cq_service(&fnic->cq[i], rq_work_to_do, + fnic_rq_cmpl_handler_cont, + NULL); + if (cur_work_done) { + err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame); + if (err) + shost_printk(KERN_ERR, fnic->lport->host, + "fnic_alloc_rq_frame cant alloc" + " frame\n"); + } + tot_rq_work_done += cur_work_done; + } + + return tot_rq_work_done; +} + +/* + * This function is called once at init time to allocate and fill RQ + * buffers. Subsequently, it is called in the interrupt context after RQ + * buffer processing to replenish the buffers in the RQ + */ +int fnic_alloc_rq_frame(struct vnic_rq *rq) +{ + struct fnic *fnic = vnic_dev_priv(rq->vdev); + struct sk_buff *skb; + u16 len; + dma_addr_t pa; + + len = FC_FRAME_HEADROOM + FC_MAX_FRAME + FC_FRAME_TAILROOM; + skb = dev_alloc_skb(len); + if (!skb) { + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, + "Unable to allocate RQ sk_buff\n"); + return -ENOMEM; + } + skb_reset_mac_header(skb); + skb_reset_transport_header(skb); + skb_reset_network_header(skb); + skb_put(skb, len); + pa = pci_map_single(fnic->pdev, skb->data, len, PCI_DMA_FROMDEVICE); + fnic_queue_rq_desc(rq, skb, pa, len); + return 0; +} + +void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf) +{ + struct fc_frame *fp = buf->os_buf; + struct fnic *fnic = vnic_dev_priv(rq->vdev); + + pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len, + PCI_DMA_FROMDEVICE); + + dev_kfree_skb(fp_skb(fp)); + buf->os_buf = NULL; +} + +static inline int is_flogi_frame(struct fc_frame_header *fh) +{ + return fh->fh_r_ctl == FC_RCTL_ELS_REQ && *(u8 *)(fh + 1) == ELS_FLOGI; +} + +int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp) +{ + struct vnic_wq *wq = &fnic->wq[0]; + struct sk_buff *skb; + dma_addr_t pa; + struct ethhdr *eth_hdr; + struct vlan_ethhdr *vlan_hdr; + struct fcoe_hdr *fcoe_hdr; + struct fc_frame_header *fh; + u32 tot_len, eth_hdr_len; + int ret = 0; + unsigned long flags; + + fh = fc_frame_header_get(fp); + skb = fp_skb(fp); + + if (!fnic->vlan_hw_insert) { + eth_hdr_len = sizeof(*vlan_hdr) + sizeof(*fcoe_hdr); + vlan_hdr = (struct vlan_ethhdr *)skb_push(skb, eth_hdr_len); + eth_hdr = (struct ethhdr *)vlan_hdr; + vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q); + vlan_hdr->h_vlan_encapsulated_proto = htons(ETH_P_FCOE); + vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id); + fcoe_hdr = (struct fcoe_hdr *)(vlan_hdr + 1); + } else { + eth_hdr_len = sizeof(*eth_hdr) + sizeof(*fcoe_hdr); + eth_hdr = (struct ethhdr *)skb_push(skb, eth_hdr_len); + eth_hdr->h_proto = htons(ETH_P_FCOE); + fcoe_hdr = (struct fcoe_hdr *)(eth_hdr + 1); + } + + if (is_flogi_frame(fh)) { + fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id); + memcpy(eth_hdr->h_source, fnic->mac_addr, ETH_ALEN); + } else { + if (fnic->fcoui_mode) + fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id); + else + memcpy(eth_hdr->h_dest, fnic->dest_addr, ETH_ALEN); + memcpy(eth_hdr->h_source, fnic->data_src_addr, ETH_ALEN); + } + + tot_len = skb->len; + BUG_ON(tot_len % 4); + + memset(fcoe_hdr, 0, sizeof(*fcoe_hdr)); + fcoe_hdr->fcoe_sof = fr_sof(fp); + if (FC_FCOE_VER) + FC_FCOE_ENCAPS_VER(fcoe_hdr, FC_FCOE_VER); + + pa = pci_map_single(fnic->pdev, eth_hdr, tot_len, PCI_DMA_TODEVICE); + + spin_lock_irqsave(&fnic->wq_lock[0], flags); + + if (!vnic_wq_desc_avail(wq)) { + pci_unmap_single(fnic->pdev, pa, + tot_len, PCI_DMA_TODEVICE); + ret = -1; + goto fnic_send_frame_end; + } + + fnic_queue_wq_desc(wq, skb, pa, tot_len, fr_eof(fp), + fnic->vlan_hw_insert, fnic->vlan_id, 1, 1, 1); +fnic_send_frame_end: + spin_unlock_irqrestore(&fnic->wq_lock[0], flags); + + if (ret) + dev_kfree_skb_any(fp_skb(fp)); + + return ret; +} + +/* + * fnic_send + * Routine to send a raw frame + */ +int fnic_send(struct fc_lport *lp, struct fc_frame *fp) +{ + struct fnic *fnic = lport_priv(lp); + struct fc_frame_header *fh; + int ret = 0; + enum fnic_state old_state; + unsigned long flags; + struct fc_frame *old_flogi = NULL; + struct fc_frame *old_flogi_resp = NULL; + + if (fnic->in_remove) { + dev_kfree_skb(fp_skb(fp)); + ret = -1; + goto fnic_send_end; + } + + fh = fc_frame_header_get(fp); + /* if not an Flogi frame, send it out, this is the common case */ + if (!is_flogi_frame(fh)) + return fnic_send_frame(fnic, fp); + + /* Flogi frame, now enter the state machine */ + + spin_lock_irqsave(&fnic->fnic_lock, flags); +again: + /* Get any old cached frames, free them after dropping lock */ + old_flogi = fnic->flogi; + fnic->flogi = NULL; + old_flogi_resp = fnic->flogi_resp; + fnic->flogi_resp = NULL; + + fnic->flogi_oxid = FC_XID_UNKNOWN; + + old_state = fnic->state; + switch (old_state) { + case FNIC_IN_FC_MODE: + case FNIC_IN_ETH_TRANS_FC_MODE: + default: + fnic->state = FNIC_IN_FC_TRANS_ETH_MODE; + vnic_dev_del_addr(fnic->vdev, fnic->data_src_addr); + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + if (old_flogi) { + dev_kfree_skb(fp_skb(old_flogi)); + old_flogi = NULL; + } + if (old_flogi_resp) { + dev_kfree_skb(fp_skb(old_flogi_resp)); + old_flogi_resp = NULL; + } + + ret = fnic_fw_reset_handler(fnic); + + spin_lock_irqsave(&fnic->fnic_lock, flags); + if (fnic->state != FNIC_IN_FC_TRANS_ETH_MODE) + goto again; + if (ret) { + fnic->state = old_state; + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + dev_kfree_skb(fp_skb(fp)); + goto fnic_send_end; + } + old_flogi = fnic->flogi; + fnic->flogi = fp; + fnic->flogi_oxid = ntohs(fh->fh_ox_id); + old_flogi_resp = fnic->flogi_resp; + fnic->flogi_resp = NULL; + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + break; + + case FNIC_IN_FC_TRANS_ETH_MODE: + /* + * A reset is pending with the firmware. Store the flogi + * and its oxid. The transition out of this state happens + * only when Firmware completes the reset, either with + * success or failed. If success, transition to + * FNIC_IN_ETH_MODE, if fail, then transition to + * FNIC_IN_FC_MODE + */ + fnic->flogi = fp; + fnic->flogi_oxid = ntohs(fh->fh_ox_id); + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + break; + + case FNIC_IN_ETH_MODE: + /* + * The fw/hw is already in eth mode. Store the oxid, + * and send the flogi frame out. The transition out of this + * state happens only we receive flogi response from the + * network, and the oxid matches the cached oxid when the + * flogi frame was sent out. If they match, then we issue + * a flogi_reg request and transition to state + * FNIC_IN_ETH_TRANS_FC_MODE + */ + fnic->flogi_oxid = ntohs(fh->fh_ox_id); + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + ret = fnic_send_frame(fnic, fp); + break; + } + +fnic_send_end: + if (old_flogi) + dev_kfree_skb(fp_skb(old_flogi)); + if (old_flogi_resp) + dev_kfree_skb(fp_skb(old_flogi_resp)); + return ret; +} + +static void fnic_wq_complete_frame_send(struct vnic_wq *wq, + struct cq_desc *cq_desc, + struct vnic_wq_buf *buf, void *opaque) +{ + struct sk_buff *skb = buf->os_buf; + struct fc_frame *fp = (struct fc_frame *)skb; + struct fnic *fnic = vnic_dev_priv(wq->vdev); + + pci_unmap_single(fnic->pdev, buf->dma_addr, + buf->len, PCI_DMA_TODEVICE); + dev_kfree_skb_irq(fp_skb(fp)); + buf->os_buf = NULL; +} + +static int fnic_wq_cmpl_handler_cont(struct vnic_dev *vdev, + struct cq_desc *cq_desc, u8 type, + u16 q_number, u16 completed_index, + void *opaque) +{ + struct fnic *fnic = vnic_dev_priv(vdev); + unsigned long flags; + + spin_lock_irqsave(&fnic->wq_lock[q_number], flags); + vnic_wq_service(&fnic->wq[q_number], cq_desc, completed_index, + fnic_wq_complete_frame_send, NULL); + spin_unlock_irqrestore(&fnic->wq_lock[q_number], flags); + + return 0; +} + +int fnic_wq_cmpl_handler(struct fnic *fnic, int work_to_do) +{ + unsigned int wq_work_done = 0; + unsigned int i; + + for (i = 0; i < fnic->raw_wq_count; i++) { + wq_work_done += vnic_cq_service(&fnic->cq[fnic->rq_count+i], + work_to_do, + fnic_wq_cmpl_handler_cont, + NULL); + } + + return wq_work_done; +} + + +void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf) +{ + struct fc_frame *fp = buf->os_buf; + struct fnic *fnic = vnic_dev_priv(wq->vdev); + + pci_unmap_single(fnic->pdev, buf->dma_addr, + buf->len, PCI_DMA_TODEVICE); + + dev_kfree_skb(fp_skb(fp)); + buf->os_buf = NULL; +} diff --git a/drivers/scsi/fnic/fnic_io.h b/drivers/scsi/fnic/fnic_io.h new file mode 100644 index 00000000000..f0b896988cd --- /dev/null +++ b/drivers/scsi/fnic/fnic_io.h @@ -0,0 +1,67 @@ +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef _FNIC_IO_H_ +#define _FNIC_IO_H_ + +#include + +#define FNIC_DFLT_SG_DESC_CNT 32 +#define FNIC_MAX_SG_DESC_CNT 1024 /* Maximum descriptors per sgl */ +#define FNIC_SG_DESC_ALIGN 16 /* Descriptor address alignment */ + +struct host_sg_desc { + __le64 addr; + __le32 len; + u32 _resvd; +}; + +struct fnic_dflt_sgl_list { + struct host_sg_desc sg_desc[FNIC_DFLT_SG_DESC_CNT]; +}; + +struct fnic_sgl_list { + struct host_sg_desc sg_desc[FNIC_MAX_SG_DESC_CNT]; +}; + +enum fnic_sgl_list_type { + FNIC_SGL_CACHE_DFLT = 0, /* cache with default size sgl */ + FNIC_SGL_CACHE_MAX, /* cache with max size sgl */ + FNIC_SGL_NUM_CACHES /* number of sgl caches */ +}; + +enum fnic_ioreq_state { + FNIC_IOREQ_CMD_PENDING = 0, + FNIC_IOREQ_ABTS_PENDING, + FNIC_IOREQ_ABTS_COMPLETE, + FNIC_IOREQ_CMD_COMPLETE, +}; + +struct fnic_io_req { + struct host_sg_desc *sgl_list; /* sgl list */ + void *sgl_list_alloc; /* sgl list address used for free */ + dma_addr_t sense_buf_pa; /* dma address for sense buffer*/ + dma_addr_t sgl_list_pa; /* dma address for sgl list */ + u16 sgl_cnt; + u8 sgl_type; /* device DMA descriptor list type */ + u8 io_completed:1; /* set to 1 when fw completes IO */ + u32 port_id; /* remote port DID */ + struct completion *abts_done; /* completion for abts */ + struct completion *dr_done; /* completion for device reset */ +}; + +#endif /* _FNIC_IO_H_ */ diff --git a/drivers/scsi/fnic/fnic_isr.c b/drivers/scsi/fnic/fnic_isr.c new file mode 100644 index 00000000000..2b3064828ae --- /dev/null +++ b/drivers/scsi/fnic/fnic_isr.c @@ -0,0 +1,332 @@ +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include +#include +#include +#include +#include +#include +#include "vnic_dev.h" +#include "vnic_intr.h" +#include "vnic_stats.h" +#include "fnic_io.h" +#include "fnic.h" + +static irqreturn_t fnic_isr_legacy(int irq, void *data) +{ + struct fnic *fnic = data; + u32 pba; + unsigned long work_done = 0; + + pba = vnic_intr_legacy_pba(fnic->legacy_pba); + if (!pba) + return IRQ_NONE; + + if (pba & (1 << FNIC_INTX_NOTIFY)) { + vnic_intr_return_all_credits(&fnic->intr[FNIC_INTX_NOTIFY]); + fnic_handle_link_event(fnic); + } + + if (pba & (1 << FNIC_INTX_ERR)) { + vnic_intr_return_all_credits(&fnic->intr[FNIC_INTX_ERR]); + fnic_log_q_error(fnic); + } + + if (pba & (1 << FNIC_INTX_WQ_RQ_COPYWQ)) { + work_done += fnic_wq_copy_cmpl_handler(fnic, 8); + work_done += fnic_wq_cmpl_handler(fnic, 4); + work_done += fnic_rq_cmpl_handler(fnic, 4); + + vnic_intr_return_credits(&fnic->intr[FNIC_INTX_WQ_RQ_COPYWQ], + work_done, + 1 /* unmask intr */, + 1 /* reset intr timer */); + } + + return IRQ_HANDLED; +} + +static irqreturn_t fnic_isr_msi(int irq, void *data) +{ + struct fnic *fnic = data; + unsigned long work_done = 0; + + work_done += fnic_wq_copy_cmpl_handler(fnic, 8); + work_done += fnic_wq_cmpl_handler(fnic, 4); + work_done += fnic_rq_cmpl_handler(fnic, 4); + + vnic_intr_return_credits(&fnic->intr[0], + work_done, + 1 /* unmask intr */, + 1 /* reset intr timer */); + + return IRQ_HANDLED; +} + +static irqreturn_t fnic_isr_msix_rq(int irq, void *data) +{ + struct fnic *fnic = data; + unsigned long rq_work_done = 0; + + rq_work_done = fnic_rq_cmpl_handler(fnic, 4); + vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_RQ], + rq_work_done, + 1 /* unmask intr */, + 1 /* reset intr timer */); + + return IRQ_HANDLED; +} + +static irqreturn_t fnic_isr_msix_wq(int irq, void *data) +{ + struct fnic *fnic = data; + unsigned long wq_work_done = 0; + + wq_work_done = fnic_wq_cmpl_handler(fnic, 4); + vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_WQ], + wq_work_done, + 1 /* unmask intr */, + 1 /* reset intr timer */); + return IRQ_HANDLED; +} + +static irqreturn_t fnic_isr_msix_wq_copy(int irq, void *data) +{ + struct fnic *fnic = data; + unsigned long wq_copy_work_done = 0; + + wq_copy_work_done = fnic_wq_copy_cmpl_handler(fnic, 8); + vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_WQ_COPY], + wq_copy_work_done, + 1 /* unmask intr */, + 1 /* reset intr timer */); + return IRQ_HANDLED; +} + +static irqreturn_t fnic_isr_msix_err_notify(int irq, void *data) +{ + struct fnic *fnic = data; + + vnic_intr_return_all_credits(&fnic->intr[FNIC_MSIX_ERR_NOTIFY]); + fnic_log_q_error(fnic); + fnic_handle_link_event(fnic); + + return IRQ_HANDLED; +} + +void fnic_free_intr(struct fnic *fnic) +{ + int i; + + switch (vnic_dev_get_intr_mode(fnic->vdev)) { + case VNIC_DEV_INTR_MODE_INTX: + case VNIC_DEV_INTR_MODE_MSI: + free_irq(fnic->pdev->irq, fnic); + break; + + case VNIC_DEV_INTR_MODE_MSIX: + for (i = 0; i < ARRAY_SIZE(fnic->msix); i++) + if (fnic->msix[i].requested) + free_irq(fnic->msix_entry[i].vector, + fnic->msix[i].devid); + break; + + default: + break; + } +} + +int fnic_request_intr(struct fnic *fnic) +{ + int err = 0; + int i; + + switch (vnic_dev_get_intr_mode(fnic->vdev)) { + + case VNIC_DEV_INTR_MODE_INTX: + err = request_irq(fnic->pdev->irq, &fnic_isr_legacy, + IRQF_SHARED, DRV_NAME, fnic); + break; + + case VNIC_DEV_INTR_MODE_MSI: + err = request_irq(fnic->pdev->irq, &fnic_isr_msi, + 0, fnic->name, fnic); + break; + + case VNIC_DEV_INTR_MODE_MSIX: + + sprintf(fnic->msix[FNIC_MSIX_RQ].devname, + "%.11s-fcs-rq", fnic->name); + fnic->msix[FNIC_MSIX_RQ].isr = fnic_isr_msix_rq; + fnic->msix[FNIC_MSIX_RQ].devid = fnic; + + sprintf(fnic->msix[FNIC_MSIX_WQ].devname, + "%.11s-fcs-wq", fnic->name); + fnic->msix[FNIC_MSIX_WQ].isr = fnic_isr_msix_wq; + fnic->msix[FNIC_MSIX_WQ].devid = fnic; + + sprintf(fnic->msix[FNIC_MSIX_WQ_COPY].devname, + "%.11s-scsi-wq", fnic->name); + fnic->msix[FNIC_MSIX_WQ_COPY].isr = fnic_isr_msix_wq_copy; + fnic->msix[FNIC_MSIX_WQ_COPY].devid = fnic; + + sprintf(fnic->msix[FNIC_MSIX_ERR_NOTIFY].devname, + "%.11s-err-notify", fnic->name); + fnic->msix[FNIC_MSIX_ERR_NOTIFY].isr = + fnic_isr_msix_err_notify; + fnic->msix[FNIC_MSIX_ERR_NOTIFY].devid = fnic; + + for (i = 0; i < ARRAY_SIZE(fnic->msix); i++) { + err = request_irq(fnic->msix_entry[i].vector, + fnic->msix[i].isr, 0, + fnic->msix[i].devname, + fnic->msix[i].devid); + if (err) { + shost_printk(KERN_ERR, fnic->lport->host, + "MSIX: request_irq" + " failed %d\n", err); + fnic_free_intr(fnic); + break; + } + fnic->msix[i].requested = 1; + } + break; + + default: + break; + } + + return err; +} + +int fnic_set_intr_mode(struct fnic *fnic) +{ + unsigned int n = ARRAY_SIZE(fnic->rq); + unsigned int m = ARRAY_SIZE(fnic->wq); + unsigned int o = ARRAY_SIZE(fnic->wq_copy); + unsigned int i; + + /* + * Set interrupt mode (INTx, MSI, MSI-X) depending + * system capabilities. + * + * Try MSI-X first + * + * We need n RQs, m WQs, o Copy WQs, n+m+o CQs, and n+m+o+1 INTRs + * (last INTR is used for WQ/RQ errors and notification area) + */ + + BUG_ON(ARRAY_SIZE(fnic->msix_entry) < n + m + o + 1); + for (i = 0; i < n + m + o + 1; i++) + fnic->msix_entry[i].entry = i; + + if (fnic->rq_count >= n && + fnic->raw_wq_count >= m && + fnic->wq_copy_count >= o && + fnic->cq_count >= n + m + o) { + if (!pci_enable_msix(fnic->pdev, fnic->msix_entry, + n + m + o + 1)) { + fnic->rq_count = n; + fnic->raw_wq_count = m; + fnic->wq_copy_count = o; + fnic->wq_count = m + o; + fnic->cq_count = n + m + o; + fnic->intr_count = n + m + o + 1; + fnic->err_intr_offset = FNIC_MSIX_ERR_NOTIFY; + + FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host, + "Using MSI-X Interrupts\n"); + vnic_dev_set_intr_mode(fnic->vdev, + VNIC_DEV_INTR_MODE_MSIX); + return 0; + } + } + + /* + * Next try MSI + * We need 1 RQ, 1 WQ, 1 WQ_COPY, 3 CQs, and 1 INTR + */ + if (fnic->rq_count >= 1 && + fnic->raw_wq_count >= 1 && + fnic->wq_copy_count >= 1 && + fnic->cq_count >= 3 && + fnic->intr_count >= 1 && + !pci_enable_msi(fnic->pdev)) { + + fnic->rq_count = 1; + fnic->raw_wq_count = 1; + fnic->wq_copy_count = 1; + fnic->wq_count = 2; + fnic->cq_count = 3; + fnic->intr_count = 1; + fnic->err_intr_offset = 0; + + FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host, + "Using MSI Interrupts\n"); + vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_MSI); + + return 0; + } + + /* + * Next try INTx + * We need 1 RQ, 1 WQ, 1 WQ_COPY, 3 CQs, and 3 INTRs + * 1 INTR is used for all 3 queues, 1 INTR for queue errors + * 1 INTR for notification area + */ + + if (fnic->rq_count >= 1 && + fnic->raw_wq_count >= 1 && + fnic->wq_copy_count >= 1 && + fnic->cq_count >= 3 && + fnic->intr_count >= 3) { + + fnic->rq_count = 1; + fnic->raw_wq_count = 1; + fnic->wq_copy_count = 1; + fnic->cq_count = 3; + fnic->intr_count = 3; + + FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host, + "Using Legacy Interrupts\n"); + vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_INTX); + + return 0; + } + + vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN); + + return -EINVAL; +} + +void fnic_clear_intr_mode(struct fnic *fnic) +{ + switch (vnic_dev_get_intr_mode(fnic->vdev)) { + case VNIC_DEV_INTR_MODE_MSIX: + pci_disable_msix(fnic->pdev); + break; + case VNIC_DEV_INTR_MODE_MSI: + pci_disable_msi(fnic->pdev); + break; + default: + break; + } + + vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_INTX); +} + diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c new file mode 100644 index 00000000000..32ef6b87d89 --- /dev/null +++ b/drivers/scsi/fnic/fnic_main.c @@ -0,0 +1,942 @@ +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "vnic_dev.h" +#include "vnic_intr.h" +#include "vnic_stats.h" +#include "fnic_io.h" +#include "fnic.h" + +#define PCI_DEVICE_ID_CISCO_FNIC 0x0045 + +/* Timer to poll notification area for events. Used for MSI interrupts */ +#define FNIC_NOTIFY_TIMER_PERIOD (2 * HZ) + +static struct kmem_cache *fnic_sgl_cache[FNIC_SGL_NUM_CACHES]; +static struct kmem_cache *fnic_io_req_cache; +LIST_HEAD(fnic_list); +DEFINE_SPINLOCK(fnic_list_lock); + +/* Supported devices by fnic module */ +static struct pci_device_id fnic_id_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_CISCO, PCI_DEVICE_ID_CISCO_FNIC) }, + { 0, } +}; + +MODULE_DESCRIPTION(DRV_DESCRIPTION); +MODULE_AUTHOR("Abhijeet Joglekar , " + "Joseph R. Eykholt "); +MODULE_LICENSE("GPL v2"); +MODULE_VERSION(DRV_VERSION); +MODULE_DEVICE_TABLE(pci, fnic_id_table); + +unsigned int fnic_log_level; +module_param(fnic_log_level, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(fnic_log_level, "bit mask of fnic logging levels"); + + +static struct libfc_function_template fnic_transport_template = { + .frame_send = fnic_send, + .fcp_abort_io = fnic_empty_scsi_cleanup, + .fcp_cleanup = fnic_empty_scsi_cleanup, + .exch_mgr_reset = fnic_exch_mgr_reset +}; + +static int fnic_slave_alloc(struct scsi_device *sdev) +{ + struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); + struct fc_lport *lp = shost_priv(sdev->host); + struct fnic *fnic = lport_priv(lp); + + sdev->tagged_supported = 1; + + if (!rport || fc_remote_port_chkready(rport)) + return -ENXIO; + + scsi_activate_tcq(sdev, FNIC_DFLT_QUEUE_DEPTH); + rport->dev_loss_tmo = fnic->config.port_down_timeout / 1000; + + return 0; +} + +static struct scsi_host_template fnic_host_template = { + .module = THIS_MODULE, + .name = DRV_NAME, + .queuecommand = fnic_queuecommand, + .eh_abort_handler = fnic_abort_cmd, + .eh_device_reset_handler = fnic_device_reset, + .eh_host_reset_handler = fnic_host_reset, + .slave_alloc = fnic_slave_alloc, + .change_queue_depth = fc_change_queue_depth, + .change_queue_type = fc_change_queue_type, + .this_id = -1, + .cmd_per_lun = 3, + .can_queue = FNIC_MAX_IO_REQ, + .use_clustering = ENABLE_CLUSTERING, + .sg_tablesize = FNIC_MAX_SG_DESC_CNT, + .max_sectors = 0xffff, + .shost_attrs = fnic_attrs, +}; + +static void fnic_get_host_speed(struct Scsi_Host *shost); +static struct scsi_transport_template *fnic_fc_transport; +static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *); + +static struct fc_function_template fnic_fc_functions = { + + .show_host_node_name = 1, + .show_host_port_name = 1, + .show_host_supported_classes = 1, + .show_host_supported_fc4s = 1, + .show_host_active_fc4s = 1, + .show_host_maxframe_size = 1, + .show_host_port_id = 1, + .show_host_supported_speeds = 1, + .get_host_speed = fnic_get_host_speed, + .show_host_speed = 1, + .show_host_port_type = 1, + .get_host_port_state = fc_get_host_port_state, + .show_host_port_state = 1, + .show_host_symbolic_name = 1, + .show_rport_maxframe_size = 1, + .show_rport_supported_classes = 1, + .show_host_fabric_name = 1, + .show_starget_node_name = 1, + .show_starget_port_name = 1, + .show_starget_port_id = 1, + .show_rport_dev_loss_tmo = 1, + .issue_fc_host_lip = fnic_reset, + .get_fc_host_stats = fnic_get_stats, + .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv), + .terminate_rport_io = fnic_terminate_rport_io, +}; + +static void fnic_get_host_speed(struct Scsi_Host *shost) +{ + struct fc_lport *lp = shost_priv(shost); + struct fnic *fnic = lport_priv(lp); + u32 port_speed = vnic_dev_port_speed(fnic->vdev); + + /* Add in other values as they get defined in fw */ + switch (port_speed) { + case 10000: + fc_host_speed(shost) = FC_PORTSPEED_10GBIT; + break; + default: + fc_host_speed(shost) = FC_PORTSPEED_10GBIT; + break; + } +} + +static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *host) +{ + int ret; + struct fc_lport *lp = shost_priv(host); + struct fnic *fnic = lport_priv(lp); + struct fc_host_statistics *stats = &lp->host_stats; + struct vnic_stats *vs; + unsigned long flags; + + if (time_before(jiffies, fnic->stats_time + HZ / FNIC_STATS_RATE_LIMIT)) + return stats; + fnic->stats_time = jiffies; + + spin_lock_irqsave(&fnic->fnic_lock, flags); + ret = vnic_dev_stats_dump(fnic->vdev, &fnic->stats); + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + if (ret) { + FNIC_MAIN_DBG(KERN_DEBUG, fnic->lport->host, + "fnic: Get vnic stats failed" + " 0x%x", ret); + return stats; + } + vs = fnic->stats; + stats->tx_frames = vs->tx.tx_unicast_frames_ok; + stats->tx_words = vs->tx.tx_unicast_bytes_ok / 4; + stats->rx_frames = vs->rx.rx_unicast_frames_ok; + stats->rx_words = vs->rx.rx_unicast_bytes_ok / 4; + stats->error_frames = vs->tx.tx_errors + vs->rx.rx_errors; + stats->dumped_frames = vs->tx.tx_drops + vs->rx.rx_drop; + stats->invalid_crc_count = vs->rx.rx_crc_errors; + stats->seconds_since_last_reset = (jiffies - lp->boot_time) / HZ; + stats->fcp_input_megabytes = div_u64(fnic->fcp_input_bytes, 1000000); + stats->fcp_output_megabytes = div_u64(fnic->fcp_output_bytes, 1000000); + + return stats; +} + +void fnic_log_q_error(struct fnic *fnic) +{ + unsigned int i; + u32 error_status; + + for (i = 0; i < fnic->raw_wq_count; i++) { + error_status = ioread32(&fnic->wq[i].ctrl->error_status); + if (error_status) + shost_printk(KERN_ERR, fnic->lport->host, + "WQ[%d] error_status" + " %d\n", i, error_status); + } + + for (i = 0; i < fnic->rq_count; i++) { + error_status = ioread32(&fnic->rq[i].ctrl->error_status); + if (error_status) + shost_printk(KERN_ERR, fnic->lport->host, + "RQ[%d] error_status" + " %d\n", i, error_status); + } + + for (i = 0; i < fnic->wq_copy_count; i++) { + error_status = ioread32(&fnic->wq_copy[i].ctrl->error_status); + if (error_status) + shost_printk(KERN_ERR, fnic->lport->host, + "CWQ[%d] error_status" + " %d\n", i, error_status); + } +} + +void fnic_handle_link_event(struct fnic *fnic) +{ + unsigned long flags; + + spin_lock_irqsave(&fnic->fnic_lock, flags); + if (fnic->stop_rx_link_events) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return; + } + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + queue_work(fnic_event_queue, &fnic->link_work); + +} + +static int fnic_notify_set(struct fnic *fnic) +{ + int err; + + switch (vnic_dev_get_intr_mode(fnic->vdev)) { + case VNIC_DEV_INTR_MODE_INTX: + err = vnic_dev_notify_set(fnic->vdev, FNIC_INTX_NOTIFY); + break; + case VNIC_DEV_INTR_MODE_MSI: + err = vnic_dev_notify_set(fnic->vdev, -1); + break; + case VNIC_DEV_INTR_MODE_MSIX: + err = vnic_dev_notify_set(fnic->vdev, FNIC_MSIX_ERR_NOTIFY); + break; + default: + shost_printk(KERN_ERR, fnic->lport->host, + "Interrupt mode should be set up" + " before devcmd notify set %d\n", + vnic_dev_get_intr_mode(fnic->vdev)); + err = -1; + break; + } + + return err; +} + +static void fnic_notify_timer(unsigned long data) +{ + struct fnic *fnic = (struct fnic *)data; + + fnic_handle_link_event(fnic); + mod_timer(&fnic->notify_timer, + round_jiffies(jiffies + FNIC_NOTIFY_TIMER_PERIOD)); +} + +static void fnic_notify_timer_start(struct fnic *fnic) +{ + switch (vnic_dev_get_intr_mode(fnic->vdev)) { + case VNIC_DEV_INTR_MODE_MSI: + /* + * Schedule first timeout immediately. The driver is + * initiatialized and ready to look for link up notification + */ + mod_timer(&fnic->notify_timer, jiffies); + break; + default: + /* Using intr for notification for INTx/MSI-X */ + break; + }; +} + +static int fnic_dev_wait(struct vnic_dev *vdev, + int (*start)(struct vnic_dev *, int), + int (*finished)(struct vnic_dev *, int *), + int arg) +{ + unsigned long time; + int done; + int err; + + err = start(vdev, arg); + if (err) + return err; + + /* Wait for func to complete...2 seconds max */ + time = jiffies + (HZ * 2); + do { + err = finished(vdev, &done); + if (err) + return err; + if (done) + return 0; + schedule_timeout_uninterruptible(HZ / 10); + } while (time_after(time, jiffies)); + + return -ETIMEDOUT; +} + +static int fnic_cleanup(struct fnic *fnic) +{ + unsigned int i; + int err; + unsigned long flags; + struct fc_frame *flogi = NULL; + struct fc_frame *flogi_resp = NULL; + + vnic_dev_disable(fnic->vdev); + for (i = 0; i < fnic->intr_count; i++) + vnic_intr_mask(&fnic->intr[i]); + + for (i = 0; i < fnic->rq_count; i++) { + err = vnic_rq_disable(&fnic->rq[i]); + if (err) + return err; + } + for (i = 0; i < fnic->raw_wq_count; i++) { + err = vnic_wq_disable(&fnic->wq[i]); + if (err) + return err; + } + for (i = 0; i < fnic->wq_copy_count; i++) { + err = vnic_wq_copy_disable(&fnic->wq_copy[i]); + if (err) + return err; + } + + /* Clean up completed IOs and FCS frames */ + fnic_wq_copy_cmpl_handler(fnic, -1); + fnic_wq_cmpl_handler(fnic, -1); + fnic_rq_cmpl_handler(fnic, -1); + + /* Clean up the IOs and FCS frames that have not completed */ + for (i = 0; i < fnic->raw_wq_count; i++) + vnic_wq_clean(&fnic->wq[i], fnic_free_wq_buf); + for (i = 0; i < fnic->rq_count; i++) + vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf); + for (i = 0; i < fnic->wq_copy_count; i++) + vnic_wq_copy_clean(&fnic->wq_copy[i], + fnic_wq_copy_cleanup_handler); + + for (i = 0; i < fnic->cq_count; i++) + vnic_cq_clean(&fnic->cq[i]); + for (i = 0; i < fnic->intr_count; i++) + vnic_intr_clean(&fnic->intr[i]); + + /* + * Remove cached flogi and flogi resp frames if any + * These frames are not in any queue, and therefore queue + * cleanup does not clean them. So clean them explicitly + */ + spin_lock_irqsave(&fnic->fnic_lock, flags); + flogi = fnic->flogi; + fnic->flogi = NULL; + flogi_resp = fnic->flogi_resp; + fnic->flogi_resp = NULL; + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + if (flogi) + dev_kfree_skb(fp_skb(flogi)); + + if (flogi_resp) + dev_kfree_skb(fp_skb(flogi_resp)); + + mempool_destroy(fnic->io_req_pool); + for (i = 0; i < FNIC_SGL_NUM_CACHES; i++) + mempool_destroy(fnic->io_sgl_pool[i]); + + return 0; +} + +static void fnic_iounmap(struct fnic *fnic) +{ + if (fnic->bar0.vaddr) + iounmap(fnic->bar0.vaddr); +} + +/* + * Allocate element for mempools requiring GFP_DMA flag. + * Otherwise, checks in kmem_flagcheck() hit BUG_ON(). + */ +static void *fnic_alloc_slab_dma(gfp_t gfp_mask, void *pool_data) +{ + struct kmem_cache *mem = pool_data; + + return kmem_cache_alloc(mem, gfp_mask | GFP_ATOMIC | GFP_DMA); +} + +static int __devinit fnic_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct Scsi_Host *host; + struct fc_lport *lp; + struct fnic *fnic; + mempool_t *pool; + int err; + int i; + unsigned long flags; + + /* + * Allocate SCSI Host and set up association between host, + * local port, and fnic + */ + host = scsi_host_alloc(&fnic_host_template, + sizeof(struct fc_lport) + sizeof(struct fnic)); + if (!host) { + printk(KERN_ERR PFX "Unable to alloc SCSI host\n"); + err = -ENOMEM; + goto err_out; + } + lp = shost_priv(host); + lp->host = host; + fnic = lport_priv(lp); + fnic->lport = lp; + + snprintf(fnic->name, sizeof(fnic->name) - 1, "%s%d", DRV_NAME, + host->host_no); + + host->transportt = fnic_fc_transport; + + err = scsi_init_shared_tag_map(host, FNIC_MAX_IO_REQ); + if (err) { + shost_printk(KERN_ERR, fnic->lport->host, + "Unable to alloc shared tag map\n"); + goto err_out_free_hba; + } + + /* Setup PCI resources */ + pci_set_drvdata(pdev, fnic); + + fnic->pdev = pdev; + + err = pci_enable_device(pdev); + if (err) { + shost_printk(KERN_ERR, fnic->lport->host, + "Cannot enable PCI device, aborting.\n"); + goto err_out_free_hba; + } + + err = pci_request_regions(pdev, DRV_NAME); + if (err) { + shost_printk(KERN_ERR, fnic->lport->host, + "Cannot enable PCI resources, aborting\n"); + goto err_out_disable_device; + } + + pci_set_master(pdev); + + /* Query PCI controller on system for DMA addressing + * limitation for the device. Try 40-bit first, and + * fail to 32-bit. + */ + err = pci_set_dma_mask(pdev, DMA_40BIT_MASK); + if (err) { + err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); + if (err) { + shost_printk(KERN_ERR, fnic->lport->host, + "No usable DMA configuration " + "aborting\n"); + goto err_out_release_regions; + } + err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); + if (err) { + shost_printk(KERN_ERR, fnic->lport->host, + "Unable to obtain 32-bit DMA " + "for consistent allocations, aborting.\n"); + goto err_out_release_regions; + } + } else { + err = pci_set_consistent_dma_mask(pdev, DMA_40BIT_MASK); + if (err) { + shost_printk(KERN_ERR, fnic->lport->host, + "Unable to obtain 40-bit DMA " + "for consistent allocations, aborting.\n"); + goto err_out_release_regions; + } + } + + /* Map vNIC resources from BAR0 */ + if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { + shost_printk(KERN_ERR, fnic->lport->host, + "BAR0 not memory-map'able, aborting.\n"); + err = -ENODEV; + goto err_out_release_regions; + } + + fnic->bar0.vaddr = pci_iomap(pdev, 0, 0); + fnic->bar0.bus_addr = pci_resource_start(pdev, 0); + fnic->bar0.len = pci_resource_len(pdev, 0); + + if (!fnic->bar0.vaddr) { + shost_printk(KERN_ERR, fnic->lport->host, + "Cannot memory-map BAR0 res hdr, " + "aborting.\n"); + err = -ENODEV; + goto err_out_release_regions; + } + + fnic->vdev = vnic_dev_register(NULL, fnic, pdev, &fnic->bar0); + if (!fnic->vdev) { + shost_printk(KERN_ERR, fnic->lport->host, + "vNIC registration failed, " + "aborting.\n"); + err = -ENODEV; + goto err_out_iounmap; + } + + err = fnic_dev_wait(fnic->vdev, vnic_dev_open, + vnic_dev_open_done, 0); + if (err) { + shost_printk(KERN_ERR, fnic->lport->host, + "vNIC dev open failed, aborting.\n"); + goto err_out_vnic_unregister; + } + + err = vnic_dev_init(fnic->vdev, 0); + if (err) { + shost_printk(KERN_ERR, fnic->lport->host, + "vNIC dev init failed, aborting.\n"); + goto err_out_dev_close; + } + + err = vnic_dev_mac_addr(fnic->vdev, fnic->mac_addr); + if (err) { + shost_printk(KERN_ERR, fnic->lport->host, + "vNIC get MAC addr failed \n"); + goto err_out_dev_close; + } + + /* Get vNIC configuration */ + err = fnic_get_vnic_config(fnic); + if (err) { + shost_printk(KERN_ERR, fnic->lport->host, + "Get vNIC configuration failed, " + "aborting.\n"); + goto err_out_dev_close; + } + host->max_lun = fnic->config.luns_per_tgt; + host->max_id = FNIC_MAX_FCP_TARGET; + + fnic_get_res_counts(fnic); + + err = fnic_set_intr_mode(fnic); + if (err) { + shost_printk(KERN_ERR, fnic->lport->host, + "Failed to set intr mode, " + "aborting.\n"); + goto err_out_dev_close; + } + + err = fnic_request_intr(fnic); + if (err) { + shost_printk(KERN_ERR, fnic->lport->host, + "Unable to request irq.\n"); + goto err_out_clear_intr; + } + + err = fnic_alloc_vnic_resources(fnic); + if (err) { + shost_printk(KERN_ERR, fnic->lport->host, + "Failed to alloc vNIC resources, " + "aborting.\n"); + goto err_out_free_intr; + } + + + /* initialize all fnic locks */ + spin_lock_init(&fnic->fnic_lock); + + for (i = 0; i < FNIC_WQ_MAX; i++) + spin_lock_init(&fnic->wq_lock[i]); + + for (i = 0; i < FNIC_WQ_COPY_MAX; i++) { + spin_lock_init(&fnic->wq_copy_lock[i]); + fnic->wq_copy_desc_low[i] = DESC_CLEAN_LOW_WATERMARK; + fnic->fw_ack_recd[i] = 0; + fnic->fw_ack_index[i] = -1; + } + + for (i = 0; i < FNIC_IO_LOCKS; i++) + spin_lock_init(&fnic->io_req_lock[i]); + + fnic->io_req_pool = mempool_create_slab_pool(2, fnic_io_req_cache); + if (!fnic->io_req_pool) + goto err_out_free_resources; + + pool = mempool_create(2, fnic_alloc_slab_dma, mempool_free_slab, + fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]); + if (!pool) + goto err_out_free_ioreq_pool; + fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT] = pool; + + pool = mempool_create(2, fnic_alloc_slab_dma, mempool_free_slab, + fnic_sgl_cache[FNIC_SGL_CACHE_MAX]); + if (!pool) + goto err_out_free_dflt_pool; + fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX] = pool; + + /* setup vlan config, hw inserts vlan header */ + fnic->vlan_hw_insert = 1; + fnic->vlan_id = 0; + + fnic->flogi_oxid = FC_XID_UNKNOWN; + fnic->flogi = NULL; + fnic->flogi_resp = NULL; + fnic->state = FNIC_IN_FC_MODE; + + /* Enable hardware stripping of vlan header on ingress */ + fnic_set_nic_config(fnic, 0, 0, 0, 0, 0, 0, 1); + + /* Setup notification buffer area */ + err = fnic_notify_set(fnic); + if (err) { + shost_printk(KERN_ERR, fnic->lport->host, + "Failed to alloc notify buffer, aborting.\n"); + goto err_out_free_max_pool; + } + + /* Setup notify timer when using MSI interrupts */ + if (vnic_dev_get_intr_mode(fnic->vdev) == VNIC_DEV_INTR_MODE_MSI) + setup_timer(&fnic->notify_timer, + fnic_notify_timer, (unsigned long)fnic); + + /* allocate RQ buffers and post them to RQ*/ + for (i = 0; i < fnic->rq_count; i++) { + err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame); + if (err) { + shost_printk(KERN_ERR, fnic->lport->host, + "fnic_alloc_rq_frame can't alloc " + "frame\n"); + goto err_out_free_rq_buf; + } + } + + /* + * Initialization done with PCI system, hardware, firmware. + * Add host to SCSI + */ + err = scsi_add_host(lp->host, &pdev->dev); + if (err) { + shost_printk(KERN_ERR, fnic->lport->host, + "fnic: scsi_add_host failed...exiting\n"); + goto err_out_free_rq_buf; + } + + /* Start local port initiatialization */ + + lp->link_up = 0; + lp->tt = fnic_transport_template; + + lp->emp = fc_exch_mgr_alloc(lp, FC_CLASS_3, + FCPIO_HOST_EXCH_RANGE_START, + FCPIO_HOST_EXCH_RANGE_END); + if (!lp->emp) { + err = -ENOMEM; + goto err_out_remove_scsi_host; + } + + lp->max_retry_count = fnic->config.flogi_retries; + lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS | + FCP_SPPF_CONF_COMPL); + if (fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR) + lp->service_params |= FCP_SPPF_RETRY; + + lp->boot_time = jiffies; + lp->e_d_tov = fnic->config.ed_tov; + lp->r_a_tov = fnic->config.ra_tov; + lp->link_supported_speeds = FC_PORTSPEED_10GBIT; + fc_set_wwnn(lp, fnic->config.node_wwn); + fc_set_wwpn(lp, fnic->config.port_wwn); + + fc_exch_init(lp); + fc_lport_init(lp); + fc_elsct_init(lp); + fc_rport_init(lp); + fc_disc_init(lp); + + fc_lport_config(lp); + + if (fc_set_mfs(lp, fnic->config.maxdatafieldsize + + sizeof(struct fc_frame_header))) { + err = -EINVAL; + goto err_out_free_exch_mgr; + } + fc_host_maxframe_size(lp->host) = lp->mfs; + + sprintf(fc_host_symbolic_name(lp->host), + DRV_NAME " v" DRV_VERSION " over %s", fnic->name); + + spin_lock_irqsave(&fnic_list_lock, flags); + list_add_tail(&fnic->list, &fnic_list); + spin_unlock_irqrestore(&fnic_list_lock, flags); + + INIT_WORK(&fnic->link_work, fnic_handle_link); + INIT_WORK(&fnic->frame_work, fnic_handle_frame); + skb_queue_head_init(&fnic->frame_queue); + + /* Enable all queues */ + for (i = 0; i < fnic->raw_wq_count; i++) + vnic_wq_enable(&fnic->wq[i]); + for (i = 0; i < fnic->rq_count; i++) + vnic_rq_enable(&fnic->rq[i]); + for (i = 0; i < fnic->wq_copy_count; i++) + vnic_wq_copy_enable(&fnic->wq_copy[i]); + + fc_fabric_login(lp); + + vnic_dev_enable(fnic->vdev); + for (i = 0; i < fnic->intr_count; i++) + vnic_intr_unmask(&fnic->intr[i]); + + fnic_notify_timer_start(fnic); + + return 0; + +err_out_free_exch_mgr: + fc_exch_mgr_free(lp->emp); +err_out_remove_scsi_host: + fc_remove_host(fnic->lport->host); + scsi_remove_host(fnic->lport->host); +err_out_free_rq_buf: + for (i = 0; i < fnic->rq_count; i++) + vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf); + vnic_dev_notify_unset(fnic->vdev); +err_out_free_max_pool: + mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX]); +err_out_free_dflt_pool: + mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT]); +err_out_free_ioreq_pool: + mempool_destroy(fnic->io_req_pool); +err_out_free_resources: + fnic_free_vnic_resources(fnic); +err_out_free_intr: + fnic_free_intr(fnic); +err_out_clear_intr: + fnic_clear_intr_mode(fnic); +err_out_dev_close: + vnic_dev_close(fnic->vdev); +err_out_vnic_unregister: + vnic_dev_unregister(fnic->vdev); +err_out_iounmap: + fnic_iounmap(fnic); +err_out_release_regions: + pci_release_regions(pdev); +err_out_disable_device: + pci_disable_device(pdev); +err_out_free_hba: + scsi_host_put(lp->host); +err_out: + return err; +} + +static void __devexit fnic_remove(struct pci_dev *pdev) +{ + struct fnic *fnic = pci_get_drvdata(pdev); + unsigned long flags; + + /* + * Mark state so that the workqueue thread stops forwarding + * received frames and link events to the local port. ISR and + * other threads that can queue work items will also stop + * creating work items on the fnic workqueue + */ + spin_lock_irqsave(&fnic->fnic_lock, flags); + fnic->stop_rx_link_events = 1; + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + if (vnic_dev_get_intr_mode(fnic->vdev) == VNIC_DEV_INTR_MODE_MSI) + del_timer_sync(&fnic->notify_timer); + + /* + * Flush the fnic event queue. After this call, there should + * be no event queued for this fnic device in the workqueue + */ + flush_workqueue(fnic_event_queue); + skb_queue_purge(&fnic->frame_queue); + + /* + * Log off the fabric. This stops all remote ports, dns port, + * logs off the fabric. This flushes all rport, disc, lport work + * before returning + */ + fc_fabric_logoff(fnic->lport); + + spin_lock_irqsave(&fnic->fnic_lock, flags); + fnic->in_remove = 1; + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + fc_lport_destroy(fnic->lport); + + /* + * This stops the fnic device, masks all interrupts. Completed + * CQ entries are drained. Posted WQ/RQ/Copy-WQ entries are + * cleaned up + */ + fnic_cleanup(fnic); + + BUG_ON(!skb_queue_empty(&fnic->frame_queue)); + + spin_lock_irqsave(&fnic_list_lock, flags); + list_del(&fnic->list); + spin_unlock_irqrestore(&fnic_list_lock, flags); + + fc_remove_host(fnic->lport->host); + scsi_remove_host(fnic->lport->host); + fc_exch_mgr_free(fnic->lport->emp); + vnic_dev_notify_unset(fnic->vdev); + fnic_free_vnic_resources(fnic); + fnic_free_intr(fnic); + fnic_clear_intr_mode(fnic); + vnic_dev_close(fnic->vdev); + vnic_dev_unregister(fnic->vdev); + fnic_iounmap(fnic); + pci_release_regions(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + scsi_host_put(fnic->lport->host); +} + +static struct pci_driver fnic_driver = { + .name = DRV_NAME, + .id_table = fnic_id_table, + .probe = fnic_probe, + .remove = __devexit_p(fnic_remove), +}; + +static int __init fnic_init_module(void) +{ + size_t len; + int err = 0; + + printk(KERN_INFO PFX "%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION); + + /* Create a cache for allocation of default size sgls */ + len = sizeof(struct fnic_dflt_sgl_list); + fnic_sgl_cache[FNIC_SGL_CACHE_DFLT] = kmem_cache_create + ("fnic_sgl_dflt", len + FNIC_SG_DESC_ALIGN, FNIC_SG_DESC_ALIGN, + SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA, + NULL); + if (!fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]) { + printk(KERN_ERR PFX "failed to create fnic dflt sgl slab\n"); + err = -ENOMEM; + goto err_create_fnic_sgl_slab_dflt; + } + + /* Create a cache for allocation of max size sgls*/ + len = sizeof(struct fnic_sgl_list); + fnic_sgl_cache[FNIC_SGL_CACHE_MAX] = kmem_cache_create + ("fnic_sgl_max", len + FNIC_SG_DESC_ALIGN, FNIC_SG_DESC_ALIGN, + SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA, + NULL); + if (!fnic_sgl_cache[FNIC_SGL_CACHE_MAX]) { + printk(KERN_ERR PFX "failed to create fnic max sgl slab\n"); + err = -ENOMEM; + goto err_create_fnic_sgl_slab_max; + } + + /* Create a cache of io_req structs for use via mempool */ + fnic_io_req_cache = kmem_cache_create("fnic_io_req", + sizeof(struct fnic_io_req), + 0, SLAB_HWCACHE_ALIGN, NULL); + if (!fnic_io_req_cache) { + printk(KERN_ERR PFX "failed to create fnic io_req slab\n"); + err = -ENOMEM; + goto err_create_fnic_ioreq_slab; + } + + fnic_event_queue = create_singlethread_workqueue("fnic_event_wq"); + if (!fnic_event_queue) { + printk(KERN_ERR PFX "fnic work queue create failed\n"); + err = -ENOMEM; + goto err_create_fnic_workq; + } + + spin_lock_init(&fnic_list_lock); + INIT_LIST_HEAD(&fnic_list); + + fnic_fc_transport = fc_attach_transport(&fnic_fc_functions); + if (!fnic_fc_transport) { + printk(KERN_ERR PFX "fc_attach_transport error\n"); + err = -ENOMEM; + goto err_fc_transport; + } + + /* register the driver with PCI system */ + err = pci_register_driver(&fnic_driver); + if (err < 0) { + printk(KERN_ERR PFX "pci register error\n"); + goto err_pci_register; + } + return err; + +err_pci_register: + fc_release_transport(fnic_fc_transport); +err_fc_transport: + destroy_workqueue(fnic_event_queue); +err_create_fnic_workq: + kmem_cache_destroy(fnic_io_req_cache); +err_create_fnic_ioreq_slab: + kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]); +err_create_fnic_sgl_slab_max: + kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]); +err_create_fnic_sgl_slab_dflt: + return err; +} + +static void __exit fnic_cleanup_module(void) +{ + pci_unregister_driver(&fnic_driver); + destroy_workqueue(fnic_event_queue); + kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]); + kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]); + kmem_cache_destroy(fnic_io_req_cache); + fc_release_transport(fnic_fc_transport); +} + +module_init(fnic_init_module); +module_exit(fnic_cleanup_module); + diff --git a/drivers/scsi/fnic/fnic_res.c b/drivers/scsi/fnic/fnic_res.c new file mode 100644 index 00000000000..7ba61ec715d --- /dev/null +++ b/drivers/scsi/fnic/fnic_res.c @@ -0,0 +1,444 @@ +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include +#include +#include +#include "wq_enet_desc.h" +#include "rq_enet_desc.h" +#include "cq_enet_desc.h" +#include "vnic_resource.h" +#include "vnic_dev.h" +#include "vnic_wq.h" +#include "vnic_rq.h" +#include "vnic_cq.h" +#include "vnic_intr.h" +#include "vnic_stats.h" +#include "vnic_nic.h" +#include "fnic.h" + +int fnic_get_vnic_config(struct fnic *fnic) +{ + struct vnic_fc_config *c = &fnic->config; + int err; + +#define GET_CONFIG(m) \ + do { \ + err = vnic_dev_spec(fnic->vdev, \ + offsetof(struct vnic_fc_config, m), \ + sizeof(c->m), &c->m); \ + if (err) { \ + shost_printk(KERN_ERR, fnic->lport->host, \ + "Error getting %s, %d\n", #m, \ + err); \ + return err; \ + } \ + } while (0); + + GET_CONFIG(node_wwn); + GET_CONFIG(port_wwn); + GET_CONFIG(wq_enet_desc_count); + GET_CONFIG(wq_copy_desc_count); + GET_CONFIG(rq_desc_count); + GET_CONFIG(maxdatafieldsize); + GET_CONFIG(ed_tov); + GET_CONFIG(ra_tov); + GET_CONFIG(intr_timer); + GET_CONFIG(intr_timer_type); + GET_CONFIG(flags); + GET_CONFIG(flogi_retries); + GET_CONFIG(flogi_timeout); + GET_CONFIG(plogi_retries); + GET_CONFIG(plogi_timeout); + GET_CONFIG(io_throttle_count); + GET_CONFIG(link_down_timeout); + GET_CONFIG(port_down_timeout); + GET_CONFIG(port_down_io_retries); + GET_CONFIG(luns_per_tgt); + + c->wq_enet_desc_count = + min_t(u32, VNIC_FNIC_WQ_DESCS_MAX, + max_t(u32, VNIC_FNIC_WQ_DESCS_MIN, + c->wq_enet_desc_count)); + c->wq_enet_desc_count = ALIGN(c->wq_enet_desc_count, 16); + + c->wq_copy_desc_count = + min_t(u32, VNIC_FNIC_WQ_COPY_DESCS_MAX, + max_t(u32, VNIC_FNIC_WQ_COPY_DESCS_MIN, + c->wq_copy_desc_count)); + c->wq_copy_desc_count = ALIGN(c->wq_copy_desc_count, 16); + + c->rq_desc_count = + min_t(u32, VNIC_FNIC_RQ_DESCS_MAX, + max_t(u32, VNIC_FNIC_RQ_DESCS_MIN, + c->rq_desc_count)); + c->rq_desc_count = ALIGN(c->rq_desc_count, 16); + + c->maxdatafieldsize = + min_t(u16, VNIC_FNIC_MAXDATAFIELDSIZE_MAX, + max_t(u16, VNIC_FNIC_MAXDATAFIELDSIZE_MIN, + c->maxdatafieldsize)); + c->ed_tov = + min_t(u32, VNIC_FNIC_EDTOV_MAX, + max_t(u32, VNIC_FNIC_EDTOV_MIN, + c->ed_tov)); + + c->ra_tov = + min_t(u32, VNIC_FNIC_RATOV_MAX, + max_t(u32, VNIC_FNIC_RATOV_MIN, + c->ra_tov)); + + c->flogi_retries = + min_t(u32, VNIC_FNIC_FLOGI_RETRIES_MAX, c->flogi_retries); + + c->flogi_timeout = + min_t(u32, VNIC_FNIC_FLOGI_TIMEOUT_MAX, + max_t(u32, VNIC_FNIC_FLOGI_TIMEOUT_MIN, + c->flogi_timeout)); + + c->plogi_retries = + min_t(u32, VNIC_FNIC_PLOGI_RETRIES_MAX, c->plogi_retries); + + c->plogi_timeout = + min_t(u32, VNIC_FNIC_PLOGI_TIMEOUT_MAX, + max_t(u32, VNIC_FNIC_PLOGI_TIMEOUT_MIN, + c->plogi_timeout)); + + c->io_throttle_count = + min_t(u32, VNIC_FNIC_IO_THROTTLE_COUNT_MAX, + max_t(u32, VNIC_FNIC_IO_THROTTLE_COUNT_MIN, + c->io_throttle_count)); + + c->link_down_timeout = + min_t(u32, VNIC_FNIC_LINK_DOWN_TIMEOUT_MAX, + c->link_down_timeout); + + c->port_down_timeout = + min_t(u32, VNIC_FNIC_PORT_DOWN_TIMEOUT_MAX, + c->port_down_timeout); + + c->port_down_io_retries = + min_t(u32, VNIC_FNIC_PORT_DOWN_IO_RETRIES_MAX, + c->port_down_io_retries); + + c->luns_per_tgt = + min_t(u32, VNIC_FNIC_LUNS_PER_TARGET_MAX, + max_t(u32, VNIC_FNIC_LUNS_PER_TARGET_MIN, + c->luns_per_tgt)); + + c->intr_timer = min_t(u16, VNIC_INTR_TIMER_MAX, c->intr_timer); + c->intr_timer_type = c->intr_timer_type; + + shost_printk(KERN_INFO, fnic->lport->host, + "vNIC MAC addr %02x:%02x:%02x:%02x:%02x:%02x " + "wq/wq_copy/rq %d/%d/%d\n", + fnic->mac_addr[0], fnic->mac_addr[1], fnic->mac_addr[2], + fnic->mac_addr[3], fnic->mac_addr[4], fnic->mac_addr[5], + c->wq_enet_desc_count, c->wq_copy_desc_count, + c->rq_desc_count); + shost_printk(KERN_INFO, fnic->lport->host, + "vNIC node wwn %llx port wwn %llx\n", + c->node_wwn, c->port_wwn); + shost_printk(KERN_INFO, fnic->lport->host, + "vNIC ed_tov %d ra_tov %d\n", + c->ed_tov, c->ra_tov); + shost_printk(KERN_INFO, fnic->lport->host, + "vNIC mtu %d intr timer %d\n", + c->maxdatafieldsize, c->intr_timer); + shost_printk(KERN_INFO, fnic->lport->host, + "vNIC flags 0x%x luns per tgt %d\n", + c->flags, c->luns_per_tgt); + shost_printk(KERN_INFO, fnic->lport->host, + "vNIC flogi_retries %d flogi timeout %d\n", + c->flogi_retries, c->flogi_timeout); + shost_printk(KERN_INFO, fnic->lport->host, + "vNIC plogi retries %d plogi timeout %d\n", + c->plogi_retries, c->plogi_timeout); + shost_printk(KERN_INFO, fnic->lport->host, + "vNIC io throttle count %d link dn timeout %d\n", + c->io_throttle_count, c->link_down_timeout); + shost_printk(KERN_INFO, fnic->lport->host, + "vNIC port dn io retries %d port dn timeout %d\n", + c->port_down_io_retries, c->port_down_timeout); + + return 0; +} + +int fnic_set_nic_config(struct fnic *fnic, u8 rss_default_cpu, + u8 rss_hash_type, + u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable, + u8 tso_ipid_split_en, u8 ig_vlan_strip_en) +{ + u64 a0, a1; + u32 nic_cfg; + int wait = 1000; + + vnic_set_nic_cfg(&nic_cfg, rss_default_cpu, + rss_hash_type, rss_hash_bits, rss_base_cpu, + rss_enable, tso_ipid_split_en, ig_vlan_strip_en); + + a0 = nic_cfg; + a1 = 0; + + return vnic_dev_cmd(fnic->vdev, CMD_NIC_CFG, &a0, &a1, wait); +} + +void fnic_get_res_counts(struct fnic *fnic) +{ + fnic->wq_count = vnic_dev_get_res_count(fnic->vdev, RES_TYPE_WQ); + fnic->raw_wq_count = fnic->wq_count - 1; + fnic->wq_copy_count = fnic->wq_count - fnic->raw_wq_count; + fnic->rq_count = vnic_dev_get_res_count(fnic->vdev, RES_TYPE_RQ); + fnic->cq_count = vnic_dev_get_res_count(fnic->vdev, RES_TYPE_CQ); + fnic->intr_count = vnic_dev_get_res_count(fnic->vdev, + RES_TYPE_INTR_CTRL); +} + +void fnic_free_vnic_resources(struct fnic *fnic) +{ + unsigned int i; + + for (i = 0; i < fnic->raw_wq_count; i++) + vnic_wq_free(&fnic->wq[i]); + + for (i = 0; i < fnic->wq_copy_count; i++) + vnic_wq_copy_free(&fnic->wq_copy[i]); + + for (i = 0; i < fnic->rq_count; i++) + vnic_rq_free(&fnic->rq[i]); + + for (i = 0; i < fnic->cq_count; i++) + vnic_cq_free(&fnic->cq[i]); + + for (i = 0; i < fnic->intr_count; i++) + vnic_intr_free(&fnic->intr[i]); +} + +int fnic_alloc_vnic_resources(struct fnic *fnic) +{ + enum vnic_dev_intr_mode intr_mode; + unsigned int mask_on_assertion; + unsigned int interrupt_offset; + unsigned int error_interrupt_enable; + unsigned int error_interrupt_offset; + unsigned int i, cq_index; + unsigned int wq_copy_cq_desc_count; + int err; + + intr_mode = vnic_dev_get_intr_mode(fnic->vdev); + + shost_printk(KERN_INFO, fnic->lport->host, "vNIC interrupt mode: %s\n", + intr_mode == VNIC_DEV_INTR_MODE_INTX ? "legacy PCI INTx" : + intr_mode == VNIC_DEV_INTR_MODE_MSI ? "MSI" : + intr_mode == VNIC_DEV_INTR_MODE_MSIX ? + "MSI-X" : "unknown"); + + shost_printk(KERN_INFO, fnic->lport->host, "vNIC resources avail: " + "wq %d cp_wq %d raw_wq %d rq %d cq %d intr %d\n", + fnic->wq_count, fnic->wq_copy_count, fnic->raw_wq_count, + fnic->rq_count, fnic->cq_count, fnic->intr_count); + + /* Allocate Raw WQ used for FCS frames */ + for (i = 0; i < fnic->raw_wq_count; i++) { + err = vnic_wq_alloc(fnic->vdev, &fnic->wq[i], i, + fnic->config.wq_enet_desc_count, + sizeof(struct wq_enet_desc)); + if (err) + goto err_out_cleanup; + } + + /* Allocate Copy WQs used for SCSI IOs */ + for (i = 0; i < fnic->wq_copy_count; i++) { + err = vnic_wq_copy_alloc(fnic->vdev, &fnic->wq_copy[i], + (fnic->raw_wq_count + i), + fnic->config.wq_copy_desc_count, + sizeof(struct fcpio_host_req)); + if (err) + goto err_out_cleanup; + } + + /* RQ for receiving FCS frames */ + for (i = 0; i < fnic->rq_count; i++) { + err = vnic_rq_alloc(fnic->vdev, &fnic->rq[i], i, + fnic->config.rq_desc_count, + sizeof(struct rq_enet_desc)); + if (err) + goto err_out_cleanup; + } + + /* CQ for each RQ */ + for (i = 0; i < fnic->rq_count; i++) { + cq_index = i; + err = vnic_cq_alloc(fnic->vdev, + &fnic->cq[cq_index], cq_index, + fnic->config.rq_desc_count, + sizeof(struct cq_enet_rq_desc)); + if (err) + goto err_out_cleanup; + } + + /* CQ for each WQ */ + for (i = 0; i < fnic->raw_wq_count; i++) { + cq_index = fnic->rq_count + i; + err = vnic_cq_alloc(fnic->vdev, &fnic->cq[cq_index], cq_index, + fnic->config.wq_enet_desc_count, + sizeof(struct cq_enet_wq_desc)); + if (err) + goto err_out_cleanup; + } + + /* CQ for each COPY WQ */ + wq_copy_cq_desc_count = (fnic->config.wq_copy_desc_count * 3); + for (i = 0; i < fnic->wq_copy_count; i++) { + cq_index = fnic->raw_wq_count + fnic->rq_count + i; + err = vnic_cq_alloc(fnic->vdev, &fnic->cq[cq_index], + cq_index, + wq_copy_cq_desc_count, + sizeof(struct fcpio_fw_req)); + if (err) + goto err_out_cleanup; + } + + for (i = 0; i < fnic->intr_count; i++) { + err = vnic_intr_alloc(fnic->vdev, &fnic->intr[i], i); + if (err) + goto err_out_cleanup; + } + + fnic->legacy_pba = vnic_dev_get_res(fnic->vdev, + RES_TYPE_INTR_PBA_LEGACY, 0); + + if (!fnic->legacy_pba && intr_mode == VNIC_DEV_INTR_MODE_INTX) { + shost_printk(KERN_ERR, fnic->lport->host, + "Failed to hook legacy pba resource\n"); + err = -ENODEV; + goto err_out_cleanup; + } + + /* + * Init RQ/WQ resources. + * + * RQ[0 to n-1] point to CQ[0 to n-1] + * WQ[0 to m-1] point to CQ[n to n+m-1] + * WQ_COPY[0 to k-1] points to CQ[n+m to n+m+k-1] + * + * Note for copy wq we always initialize with cq_index = 0 + * + * Error interrupt is not enabled for MSI. + */ + + switch (intr_mode) { + case VNIC_DEV_INTR_MODE_INTX: + case VNIC_DEV_INTR_MODE_MSIX: + error_interrupt_enable = 1; + error_interrupt_offset = fnic->err_intr_offset; + break; + default: + error_interrupt_enable = 0; + error_interrupt_offset = 0; + break; + } + + for (i = 0; i < fnic->rq_count; i++) { + cq_index = i; + vnic_rq_init(&fnic->rq[i], + cq_index, + error_interrupt_enable, + error_interrupt_offset); + } + + for (i = 0; i < fnic->raw_wq_count; i++) { + cq_index = i + fnic->rq_count; + vnic_wq_init(&fnic->wq[i], + cq_index, + error_interrupt_enable, + error_interrupt_offset); + } + + for (i = 0; i < fnic->wq_copy_count; i++) { + vnic_wq_copy_init(&fnic->wq_copy[i], + 0 /* cq_index 0 - always */, + error_interrupt_enable, + error_interrupt_offset); + } + + for (i = 0; i < fnic->cq_count; i++) { + + switch (intr_mode) { + case VNIC_DEV_INTR_MODE_MSIX: + interrupt_offset = i; + break; + default: + interrupt_offset = 0; + break; + } + + vnic_cq_init(&fnic->cq[i], + 0 /* flow_control_enable */, + 1 /* color_enable */, + 0 /* cq_head */, + 0 /* cq_tail */, + 1 /* cq_tail_color */, + 1 /* interrupt_enable */, + 1 /* cq_entry_enable */, + 0 /* cq_message_enable */, + interrupt_offset, + 0 /* cq_message_addr */); + } + + /* + * Init INTR resources + * + * mask_on_assertion is not used for INTx due to the level- + * triggered nature of INTx + */ + + switch (intr_mode) { + case VNIC_DEV_INTR_MODE_MSI: + case VNIC_DEV_INTR_MODE_MSIX: + mask_on_assertion = 1; + break; + default: + mask_on_assertion = 0; + break; + } + + for (i = 0; i < fnic->intr_count; i++) { + vnic_intr_init(&fnic->intr[i], + fnic->config.intr_timer, + fnic->config.intr_timer_type, + mask_on_assertion); + } + + /* init the stats memory by making the first call here */ + err = vnic_dev_stats_dump(fnic->vdev, &fnic->stats); + if (err) { + shost_printk(KERN_ERR, fnic->lport->host, + "vnic_dev_stats_dump failed - x%x\n", err); + goto err_out_cleanup; + } + + /* Clear LIF stats */ + vnic_dev_stats_clear(fnic->vdev); + + return 0; + +err_out_cleanup: + fnic_free_vnic_resources(fnic); + + return err; +} diff --git a/drivers/scsi/fnic/fnic_res.h b/drivers/scsi/fnic/fnic_res.h new file mode 100644 index 00000000000..b6f31026253 --- /dev/null +++ b/drivers/scsi/fnic/fnic_res.h @@ -0,0 +1,197 @@ +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef _FNIC_RES_H_ +#define _FNIC_RES_H_ + +#include "wq_enet_desc.h" +#include "rq_enet_desc.h" +#include "vnic_wq.h" +#include "vnic_rq.h" +#include "fnic_io.h" +#include "fcpio.h" +#include "vnic_wq_copy.h" +#include "vnic_cq_copy.h" + +static inline void fnic_queue_wq_desc(struct vnic_wq *wq, + void *os_buf, dma_addr_t dma_addr, + unsigned int len, unsigned int fc_eof, + int vlan_tag_insert, + unsigned int vlan_tag, + int cq_entry, int sop, int eop) +{ + struct wq_enet_desc *desc = vnic_wq_next_desc(wq); + + wq_enet_desc_enc(desc, + (u64)dma_addr | VNIC_PADDR_TARGET, + (u16)len, + 0, /* mss_or_csum_offset */ + (u16)fc_eof, + 0, /* offload_mode */ + (u8)eop, (u8)cq_entry, + 1, /* fcoe_encap */ + (u8)vlan_tag_insert, + (u16)vlan_tag, + 0 /* loopback */); + + vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop); +} + +static inline void fnic_queue_wq_copy_desc_icmnd_16(struct vnic_wq_copy *wq, + u32 req_id, + u32 lunmap_id, u8 spl_flags, + u32 sgl_cnt, u32 sense_len, + u64 sgl_addr, u64 sns_addr, + u8 crn, u8 pri_ta, + u8 flags, u8 *scsi_cdb, + u32 data_len, u8 *lun, + u32 d_id, u16 mss, + u32 ratov, u32 edtov) +{ + struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq); + + desc->hdr.type = FCPIO_ICMND_16; /* enum fcpio_type */ + desc->hdr.status = 0; /* header status entry */ + desc->hdr._resvd = 0; /* reserved */ + desc->hdr.tag.u.req_id = req_id; /* id for this request */ + + desc->u.icmnd_16.lunmap_id = lunmap_id; /* index into lunmap table */ + desc->u.icmnd_16.special_req_flags = spl_flags; /* exch req flags */ + desc->u.icmnd_16._resvd0[0] = 0; /* reserved */ + desc->u.icmnd_16._resvd0[1] = 0; /* reserved */ + desc->u.icmnd_16._resvd0[2] = 0; /* reserved */ + desc->u.icmnd_16.sgl_cnt = sgl_cnt; /* scatter-gather list count */ + desc->u.icmnd_16.sense_len = sense_len; /* sense buffer length */ + desc->u.icmnd_16.sgl_addr = sgl_addr; /* scatter-gather list addr */ + desc->u.icmnd_16.sense_addr = sns_addr; /* sense buffer address */ + desc->u.icmnd_16.crn = crn; /* SCSI Command Reference No.*/ + desc->u.icmnd_16.pri_ta = pri_ta; /* SCSI Pri & Task attribute */ + desc->u.icmnd_16._resvd1 = 0; /* reserved: should be 0 */ + desc->u.icmnd_16.flags = flags; /* command flags */ + memcpy(desc->u.icmnd_16.scsi_cdb, scsi_cdb, CDB_16); /* SCSI CDB */ + desc->u.icmnd_16.data_len = data_len; /* length of data expected */ + memcpy(desc->u.icmnd_16.lun, lun, LUN_ADDRESS); /* LUN address */ + desc->u.icmnd_16._resvd2 = 0; /* reserved */ + hton24(desc->u.icmnd_16.d_id, d_id); /* FC vNIC only: Target D_ID */ + desc->u.icmnd_16.mss = mss; /* FC vNIC only: max burst */ + desc->u.icmnd_16.r_a_tov = ratov; /*FC vNIC only: Res. Alloc Timeout */ + desc->u.icmnd_16.e_d_tov = edtov; /*FC vNIC only: Err Detect Timeout */ + + vnic_wq_copy_post(wq); +} + +static inline void fnic_queue_wq_copy_desc_itmf(struct vnic_wq_copy *wq, + u32 req_id, u32 lunmap_id, + u32 tm_req, u32 tm_id, u8 *lun, + u32 d_id, u32 r_a_tov, + u32 e_d_tov) +{ + struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq); + + desc->hdr.type = FCPIO_ITMF; /* enum fcpio_type */ + desc->hdr.status = 0; /* header status entry */ + desc->hdr._resvd = 0; /* reserved */ + desc->hdr.tag.u.req_id = req_id; /* id for this request */ + + desc->u.itmf.lunmap_id = lunmap_id; /* index into lunmap table */ + desc->u.itmf.tm_req = tm_req; /* SCSI Task Management request */ + desc->u.itmf.t_tag = tm_id; /* tag of fcpio to be aborted */ + desc->u.itmf._resvd = 0; + memcpy(desc->u.itmf.lun, lun, LUN_ADDRESS); /* LUN address */ + desc->u.itmf._resvd1 = 0; + hton24(desc->u.itmf.d_id, d_id); /* FC vNIC only: Target D_ID */ + desc->u.itmf.r_a_tov = r_a_tov; /* FC vNIC only: R_A_TOV in msec */ + desc->u.itmf.e_d_tov = e_d_tov; /* FC vNIC only: E_D_TOV in msec */ + + vnic_wq_copy_post(wq); +} + +static inline void fnic_queue_wq_copy_desc_flogi_reg(struct vnic_wq_copy *wq, + u32 req_id, u8 format, + u32 s_id, u8 *gw_mac) +{ + struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq); + + desc->hdr.type = FCPIO_FLOGI_REG; /* enum fcpio_type */ + desc->hdr.status = 0; /* header status entry */ + desc->hdr._resvd = 0; /* reserved */ + desc->hdr.tag.u.req_id = req_id; /* id for this request */ + + desc->u.flogi_reg.format = format; + hton24(desc->u.flogi_reg.s_id, s_id); + memcpy(desc->u.flogi_reg.gateway_mac, gw_mac, ETH_ALEN); + + vnic_wq_copy_post(wq); +} + +static inline void fnic_queue_wq_copy_desc_fw_reset(struct vnic_wq_copy *wq, + u32 req_id) +{ + struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq); + + desc->hdr.type = FCPIO_RESET; /* enum fcpio_type */ + desc->hdr.status = 0; /* header status entry */ + desc->hdr._resvd = 0; /* reserved */ + desc->hdr.tag.u.req_id = req_id; /* id for this request */ + + vnic_wq_copy_post(wq); +} + +static inline void fnic_queue_wq_copy_desc_lunmap(struct vnic_wq_copy *wq, + u32 req_id, u64 lunmap_addr, + u32 lunmap_len) +{ + struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq); + + desc->hdr.type = FCPIO_LUNMAP_REQ; /* enum fcpio_type */ + desc->hdr.status = 0; /* header status entry */ + desc->hdr._resvd = 0; /* reserved */ + desc->hdr.tag.u.req_id = req_id; /* id for this request */ + + desc->u.lunmap_req.addr = lunmap_addr; /* address of the buffer */ + desc->u.lunmap_req.len = lunmap_len; /* len of the buffer */ + + vnic_wq_copy_post(wq); +} + +static inline void fnic_queue_rq_desc(struct vnic_rq *rq, + void *os_buf, dma_addr_t dma_addr, + u16 len) +{ + struct rq_enet_desc *desc = vnic_rq_next_desc(rq); + + rq_enet_desc_enc(desc, + (u64)dma_addr | VNIC_PADDR_TARGET, + RQ_ENET_TYPE_ONLY_SOP, + (u16)len); + + vnic_rq_post(rq, os_buf, 0, dma_addr, len); +} + + +struct fnic; + +int fnic_get_vnic_config(struct fnic *); +int fnic_alloc_vnic_resources(struct fnic *); +void fnic_free_vnic_resources(struct fnic *); +void fnic_get_res_counts(struct fnic *); +int fnic_set_nic_config(struct fnic *fnic, u8 rss_default_cpu, + u8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu, + u8 rss_enable, u8 tso_ipid_split_en, + u8 ig_vlan_strip_en); + +#endif /* _FNIC_RES_H_ */ diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c new file mode 100644 index 00000000000..eabf3650285 --- /dev/null +++ b/drivers/scsi/fnic/fnic_scsi.c @@ -0,0 +1,1850 @@ +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "fnic_io.h" +#include "fnic.h" + +const char *fnic_state_str[] = { + [FNIC_IN_FC_MODE] = "FNIC_IN_FC_MODE", + [FNIC_IN_FC_TRANS_ETH_MODE] = "FNIC_IN_FC_TRANS_ETH_MODE", + [FNIC_IN_ETH_MODE] = "FNIC_IN_ETH_MODE", + [FNIC_IN_ETH_TRANS_FC_MODE] = "FNIC_IN_ETH_TRANS_FC_MODE", +}; + +static const char *fnic_ioreq_state_str[] = { + [FNIC_IOREQ_CMD_PENDING] = "FNIC_IOREQ_CMD_PENDING", + [FNIC_IOREQ_ABTS_PENDING] = "FNIC_IOREQ_ABTS_PENDING", + [FNIC_IOREQ_ABTS_COMPLETE] = "FNIC_IOREQ_ABTS_COMPLETE", + [FNIC_IOREQ_CMD_COMPLETE] = "FNIC_IOREQ_CMD_COMPLETE", +}; + +static const char *fcpio_status_str[] = { + [FCPIO_SUCCESS] = "FCPIO_SUCCESS", /*0x0*/ + [FCPIO_INVALID_HEADER] = "FCPIO_INVALID_HEADER", + [FCPIO_OUT_OF_RESOURCE] = "FCPIO_OUT_OF_RESOURCE", + [FCPIO_INVALID_PARAM] = "FCPIO_INVALID_PARAM]", + [FCPIO_REQ_NOT_SUPPORTED] = "FCPIO_REQ_NOT_SUPPORTED", + [FCPIO_IO_NOT_FOUND] = "FCPIO_IO_NOT_FOUND", + [FCPIO_ABORTED] = "FCPIO_ABORTED", /*0x41*/ + [FCPIO_TIMEOUT] = "FCPIO_TIMEOUT", + [FCPIO_SGL_INVALID] = "FCPIO_SGL_INVALID", + [FCPIO_MSS_INVALID] = "FCPIO_MSS_INVALID", + [FCPIO_DATA_CNT_MISMATCH] = "FCPIO_DATA_CNT_MISMATCH", + [FCPIO_FW_ERR] = "FCPIO_FW_ERR", + [FCPIO_ITMF_REJECTED] = "FCPIO_ITMF_REJECTED", + [FCPIO_ITMF_FAILED] = "FCPIO_ITMF_FAILED", + [FCPIO_ITMF_INCORRECT_LUN] = "FCPIO_ITMF_INCORRECT_LUN", + [FCPIO_CMND_REJECTED] = "FCPIO_CMND_REJECTED", + [FCPIO_NO_PATH_AVAIL] = "FCPIO_NO_PATH_AVAIL", + [FCPIO_PATH_FAILED] = "FCPIO_PATH_FAILED", + [FCPIO_LUNMAP_CHNG_PEND] = "FCPIO_LUNHMAP_CHNG_PEND", +}; + +const char *fnic_state_to_str(unsigned int state) +{ + if (state >= ARRAY_SIZE(fnic_state_str) || !fnic_state_str[state]) + return "unknown"; + + return fnic_state_str[state]; +} + +static const char *fnic_ioreq_state_to_str(unsigned int state) +{ + if (state >= ARRAY_SIZE(fnic_ioreq_state_str) || + !fnic_ioreq_state_str[state]) + return "unknown"; + + return fnic_ioreq_state_str[state]; +} + +static const char *fnic_fcpio_status_to_str(unsigned int status) +{ + if (status >= ARRAY_SIZE(fcpio_status_str) || !fcpio_status_str[status]) + return "unknown"; + + return fcpio_status_str[status]; +} + +static void fnic_cleanup_io(struct fnic *fnic, int exclude_id); + +static inline spinlock_t *fnic_io_lock_hash(struct fnic *fnic, + struct scsi_cmnd *sc) +{ + u32 hash = sc->request->tag & (FNIC_IO_LOCKS - 1); + + return &fnic->io_req_lock[hash]; +} + +/* + * Unmap the data buffer and sense buffer for an io_req, + * also unmap and free the device-private scatter/gather list. + */ +static void fnic_release_ioreq_buf(struct fnic *fnic, + struct fnic_io_req *io_req, + struct scsi_cmnd *sc) +{ + if (io_req->sgl_list_pa) + pci_unmap_single(fnic->pdev, io_req->sgl_list_pa, + sizeof(io_req->sgl_list[0]) * io_req->sgl_cnt, + PCI_DMA_TODEVICE); + scsi_dma_unmap(sc); + + if (io_req->sgl_cnt) + mempool_free(io_req->sgl_list_alloc, + fnic->io_sgl_pool[io_req->sgl_type]); + if (io_req->sense_buf_pa) + pci_unmap_single(fnic->pdev, io_req->sense_buf_pa, + SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE); +} + +/* Free up Copy Wq descriptors. Called with copy_wq lock held */ +static int free_wq_copy_descs(struct fnic *fnic, struct vnic_wq_copy *wq) +{ + /* if no Ack received from firmware, then nothing to clean */ + if (!fnic->fw_ack_recd[0]) + return 1; + + /* + * Update desc_available count based on number of freed descriptors + * Account for wraparound + */ + if (wq->to_clean_index <= fnic->fw_ack_index[0]) + wq->ring.desc_avail += (fnic->fw_ack_index[0] + - wq->to_clean_index + 1); + else + wq->ring.desc_avail += (wq->ring.desc_count + - wq->to_clean_index + + fnic->fw_ack_index[0] + 1); + + /* + * just bump clean index to ack_index+1 accounting for wraparound + * this will essentially free up all descriptors between + * to_clean_index and fw_ack_index, both inclusive + */ + wq->to_clean_index = + (fnic->fw_ack_index[0] + 1) % wq->ring.desc_count; + + /* we have processed the acks received so far */ + fnic->fw_ack_recd[0] = 0; + return 0; +} + + +/* + * fnic_fw_reset_handler + * Routine to send reset msg to fw + */ +int fnic_fw_reset_handler(struct fnic *fnic) +{ + struct vnic_wq_copy *wq = &fnic->wq_copy[0]; + int ret = 0; + unsigned long flags; + + spin_lock_irqsave(&fnic->wq_copy_lock[0], flags); + + if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) + free_wq_copy_descs(fnic, wq); + + if (!vnic_wq_copy_desc_avail(wq)) + ret = -EAGAIN; + else + fnic_queue_wq_copy_desc_fw_reset(wq, SCSI_NO_TAG); + + spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); + + if (!ret) + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "Issued fw reset\n"); + else + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "Failed to issue fw reset\n"); + return ret; +} + + +/* + * fnic_flogi_reg_handler + * Routine to send flogi register msg to fw + */ +int fnic_flogi_reg_handler(struct fnic *fnic) +{ + struct vnic_wq_copy *wq = &fnic->wq_copy[0]; + u8 gw_mac[ETH_ALEN]; + int ret = 0; + unsigned long flags; + + spin_lock_irqsave(&fnic->wq_copy_lock[0], flags); + + if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) + free_wq_copy_descs(fnic, wq); + + if (!vnic_wq_copy_desc_avail(wq)) { + ret = -EAGAIN; + goto flogi_reg_ioreq_end; + } + + if (fnic->fcoui_mode) + memset(gw_mac, 0xff, ETH_ALEN); + else + memcpy(gw_mac, fnic->dest_addr, ETH_ALEN); + + fnic_queue_wq_copy_desc_flogi_reg(wq, SCSI_NO_TAG, + FCPIO_FLOGI_REG_GW_DEST, + fnic->s_id, + gw_mac); + +flogi_reg_ioreq_end: + spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); + + if (!ret) + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "flog reg issued\n"); + + return ret; +} + +/* + * fnic_queue_wq_copy_desc + * Routine to enqueue a wq copy desc + */ +static inline int fnic_queue_wq_copy_desc(struct fnic *fnic, + struct vnic_wq_copy *wq, + struct fnic_io_req *io_req, + struct scsi_cmnd *sc, + u32 sg_count) +{ + struct scatterlist *sg; + struct fc_rport *rport = starget_to_rport(scsi_target(sc->device)); + struct fc_rport_libfc_priv *rp = rport->dd_data; + struct host_sg_desc *desc; + u8 pri_tag = 0; + unsigned int i; + unsigned long intr_flags; + int flags; + u8 exch_flags; + struct scsi_lun fc_lun; + char msg[2]; + + if (sg_count) { + BUG_ON(sg_count < 0); + BUG_ON(sg_count > FNIC_MAX_SG_DESC_CNT); + + /* For each SGE, create a device desc entry */ + desc = io_req->sgl_list; + for_each_sg(scsi_sglist(sc), sg, sg_count, i) { + desc->addr = cpu_to_le64(sg_dma_address(sg)); + desc->len = cpu_to_le32(sg_dma_len(sg)); + desc->_resvd = 0; + desc++; + } + + io_req->sgl_list_pa = pci_map_single + (fnic->pdev, + io_req->sgl_list, + sizeof(io_req->sgl_list[0]) * sg_count, + PCI_DMA_TODEVICE); + } + + io_req->sense_buf_pa = pci_map_single(fnic->pdev, + sc->sense_buffer, + SCSI_SENSE_BUFFERSIZE, + PCI_DMA_FROMDEVICE); + + int_to_scsilun(sc->device->lun, &fc_lun); + + pri_tag = FCPIO_ICMND_PTA_SIMPLE; + msg[0] = MSG_SIMPLE_TAG; + scsi_populate_tag_msg(sc, msg); + if (msg[0] == MSG_ORDERED_TAG) + pri_tag = FCPIO_ICMND_PTA_ORDERED; + + /* Enqueue the descriptor in the Copy WQ */ + spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags); + + if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) + free_wq_copy_descs(fnic, wq); + + if (unlikely(!vnic_wq_copy_desc_avail(wq))) { + spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags); + return SCSI_MLQUEUE_HOST_BUSY; + } + + flags = 0; + if (sc->sc_data_direction == DMA_FROM_DEVICE) + flags = FCPIO_ICMND_RDDATA; + else if (sc->sc_data_direction == DMA_TO_DEVICE) + flags = FCPIO_ICMND_WRDATA; + + exch_flags = 0; + if ((fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR) && + (rp->flags & FC_RP_FLAGS_RETRY)) + exch_flags |= FCPIO_ICMND_SRFLAG_RETRY; + + fnic_queue_wq_copy_desc_icmnd_16(wq, sc->request->tag, + 0, exch_flags, io_req->sgl_cnt, + SCSI_SENSE_BUFFERSIZE, + io_req->sgl_list_pa, + io_req->sense_buf_pa, + 0, /* scsi cmd ref, always 0 */ + pri_tag, /* scsi pri and tag */ + flags, /* command flags */ + sc->cmnd, scsi_bufflen(sc), + fc_lun.scsi_lun, io_req->port_id, + rport->maxframe_size, rp->r_a_tov, + rp->e_d_tov); + + spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags); + return 0; +} + +/* + * fnic_queuecommand + * Routine to send a scsi cdb + * Called with host_lock held and interrupts disabled. + */ +int fnic_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) +{ + struct fc_lport *lp; + struct fc_rport *rport; + struct fnic_io_req *io_req; + struct fnic *fnic; + struct vnic_wq_copy *wq; + int ret; + u32 sg_count; + unsigned long flags; + unsigned long ptr; + + rport = starget_to_rport(scsi_target(sc->device)); + ret = fc_remote_port_chkready(rport); + if (ret) { + sc->result = ret; + done(sc); + return 0; + } + + lp = shost_priv(sc->device->host); + if (lp->state != LPORT_ST_READY || !(lp->link_up)) + return SCSI_MLQUEUE_HOST_BUSY; + + /* + * Release host lock, use driver resource specific locks from here. + * Don't re-enable interrupts in case they were disabled prior to the + * caller disabling them. + */ + spin_unlock(lp->host->host_lock); + + /* Get a new io_req for this SCSI IO */ + fnic = lport_priv(lp); + + io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC); + if (!io_req) { + ret = SCSI_MLQUEUE_HOST_BUSY; + goto out; + } + memset(io_req, 0, sizeof(*io_req)); + + /* Map the data buffer */ + sg_count = scsi_dma_map(sc); + if (sg_count < 0) { + mempool_free(io_req, fnic->io_req_pool); + goto out; + } + + /* Determine the type of scatter/gather list we need */ + io_req->sgl_cnt = sg_count; + io_req->sgl_type = FNIC_SGL_CACHE_DFLT; + if (sg_count > FNIC_DFLT_SG_DESC_CNT) + io_req->sgl_type = FNIC_SGL_CACHE_MAX; + + if (sg_count) { + io_req->sgl_list = + mempool_alloc(fnic->io_sgl_pool[io_req->sgl_type], + GFP_ATOMIC | GFP_DMA); + if (!io_req->sgl_list) { + ret = SCSI_MLQUEUE_HOST_BUSY; + scsi_dma_unmap(sc); + mempool_free(io_req, fnic->io_req_pool); + goto out; + } + + /* Cache sgl list allocated address before alignment */ + io_req->sgl_list_alloc = io_req->sgl_list; + ptr = (unsigned long) io_req->sgl_list; + if (ptr % FNIC_SG_DESC_ALIGN) { + io_req->sgl_list = (struct host_sg_desc *) + (((unsigned long) ptr + + FNIC_SG_DESC_ALIGN - 1) + & ~(FNIC_SG_DESC_ALIGN - 1)); + } + } + + /* initialize rest of io_req */ + io_req->port_id = rport->port_id; + CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING; + CMD_SP(sc) = (char *)io_req; + sc->scsi_done = done; + + /* create copy wq desc and enqueue it */ + wq = &fnic->wq_copy[0]; + ret = fnic_queue_wq_copy_desc(fnic, wq, io_req, sc, sg_count); + if (ret) { + /* + * In case another thread cancelled the request, + * refetch the pointer under the lock. + */ + spinlock_t *io_lock = fnic_io_lock_hash(fnic, sc); + + spin_lock_irqsave(io_lock, flags); + io_req = (struct fnic_io_req *)CMD_SP(sc); + CMD_SP(sc) = NULL; + CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE; + spin_unlock_irqrestore(io_lock, flags); + if (io_req) { + fnic_release_ioreq_buf(fnic, io_req, sc); + mempool_free(io_req, fnic->io_req_pool); + } + } +out: + /* acquire host lock before returning to SCSI */ + spin_lock(lp->host->host_lock); + return ret; +} + +/* + * fnic_fcpio_fw_reset_cmpl_handler + * Routine to handle fw reset completion + */ +static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic, + struct fcpio_fw_req *desc) +{ + u8 type; + u8 hdr_status; + struct fcpio_tag tag; + int ret = 0; + struct fc_frame *flogi; + unsigned long flags; + + fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag); + + /* Clean up all outstanding io requests */ + fnic_cleanup_io(fnic, SCSI_NO_TAG); + + spin_lock_irqsave(&fnic->fnic_lock, flags); + + flogi = fnic->flogi; + fnic->flogi = NULL; + + /* fnic should be in FC_TRANS_ETH_MODE */ + if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) { + /* Check status of reset completion */ + if (!hdr_status) { + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "reset cmpl success\n"); + /* Ready to send flogi out */ + fnic->state = FNIC_IN_ETH_MODE; + } else { + FNIC_SCSI_DBG(KERN_DEBUG, + fnic->lport->host, + "fnic fw_reset : failed %s\n", + fnic_fcpio_status_to_str(hdr_status)); + + /* + * Unable to change to eth mode, cannot send out flogi + * Change state to fc mode, so that subsequent Flogi + * requests from libFC will cause more attempts to + * reset the firmware. Free the cached flogi + */ + fnic->state = FNIC_IN_FC_MODE; + ret = -1; + } + } else { + FNIC_SCSI_DBG(KERN_DEBUG, + fnic->lport->host, + "Unexpected state %s while processing" + " reset cmpl\n", fnic_state_to_str(fnic->state)); + ret = -1; + } + + /* Thread removing device blocks till firmware reset is complete */ + if (fnic->remove_wait) + complete(fnic->remove_wait); + + /* + * If fnic is being removed, or fw reset failed + * free the flogi frame. Else, send it out + */ + if (fnic->remove_wait || ret) { + fnic->flogi_oxid = FC_XID_UNKNOWN; + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + if (flogi) + dev_kfree_skb_irq(fp_skb(flogi)); + goto reset_cmpl_handler_end; + } + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + if (flogi) + ret = fnic_send_frame(fnic, flogi); + + reset_cmpl_handler_end: + return ret; +} + +/* + * fnic_fcpio_flogi_reg_cmpl_handler + * Routine to handle flogi register completion + */ +static int fnic_fcpio_flogi_reg_cmpl_handler(struct fnic *fnic, + struct fcpio_fw_req *desc) +{ + u8 type; + u8 hdr_status; + struct fcpio_tag tag; + int ret = 0; + struct fc_frame *flogi_resp = NULL; + unsigned long flags; + struct sk_buff *skb; + + fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag); + + /* Update fnic state based on status of flogi reg completion */ + spin_lock_irqsave(&fnic->fnic_lock, flags); + + flogi_resp = fnic->flogi_resp; + fnic->flogi_resp = NULL; + + if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE) { + + /* Check flogi registration completion status */ + if (!hdr_status) { + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "flog reg succeeded\n"); + fnic->state = FNIC_IN_FC_MODE; + } else { + FNIC_SCSI_DBG(KERN_DEBUG, + fnic->lport->host, + "fnic flogi reg :failed %s\n", + fnic_fcpio_status_to_str(hdr_status)); + fnic->state = FNIC_IN_ETH_MODE; + ret = -1; + } + } else { + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "Unexpected fnic state %s while" + " processing flogi reg completion\n", + fnic_state_to_str(fnic->state)); + ret = -1; + } + + /* Successful flogi reg cmpl, pass frame to LibFC */ + if (!ret && flogi_resp) { + if (fnic->stop_rx_link_events) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + goto reg_cmpl_handler_end; + } + skb = (struct sk_buff *)flogi_resp; + /* Use fr_flags to indicate whether flogi resp or not */ + fr_flags(flogi_resp) = 1; + fr_dev(flogi_resp) = fnic->lport; + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + skb_queue_tail(&fnic->frame_queue, skb); + queue_work(fnic_event_queue, &fnic->frame_work); + + } else { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + if (flogi_resp) + dev_kfree_skb_irq(fp_skb(flogi_resp)); + } + +reg_cmpl_handler_end: + return ret; +} + +static inline int is_ack_index_in_range(struct vnic_wq_copy *wq, + u16 request_out) +{ + if (wq->to_clean_index <= wq->to_use_index) { + /* out of range, stale request_out index */ + if (request_out < wq->to_clean_index || + request_out >= wq->to_use_index) + return 0; + } else { + /* out of range, stale request_out index */ + if (request_out < wq->to_clean_index && + request_out >= wq->to_use_index) + return 0; + } + /* request_out index is in range */ + return 1; +} + + +/* + * Mark that ack received and store the Ack index. If there are multiple + * acks received before Tx thread cleans it up, the latest value will be + * used which is correct behavior. This state should be in the copy Wq + * instead of in the fnic + */ +static inline void fnic_fcpio_ack_handler(struct fnic *fnic, + unsigned int cq_index, + struct fcpio_fw_req *desc) +{ + struct vnic_wq_copy *wq; + u16 request_out = desc->u.ack.request_out; + unsigned long flags; + + /* mark the ack state */ + wq = &fnic->wq_copy[cq_index - fnic->raw_wq_count - fnic->rq_count]; + spin_lock_irqsave(&fnic->wq_copy_lock[0], flags); + + if (is_ack_index_in_range(wq, request_out)) { + fnic->fw_ack_index[0] = request_out; + fnic->fw_ack_recd[0] = 1; + } + spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); +} + +/* + * fnic_fcpio_icmnd_cmpl_handler + * Routine to handle icmnd completions + */ +static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, + struct fcpio_fw_req *desc) +{ + u8 type; + u8 hdr_status; + struct fcpio_tag tag; + u32 id; + u64 xfer_len = 0; + struct fcpio_icmnd_cmpl *icmnd_cmpl; + struct fnic_io_req *io_req; + struct scsi_cmnd *sc; + unsigned long flags; + spinlock_t *io_lock; + + /* Decode the cmpl description to get the io_req id */ + fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag); + fcpio_tag_id_dec(&tag, &id); + + if (id >= FNIC_MAX_IO_REQ) + return; + + sc = scsi_host_find_tag(fnic->lport->host, id); + WARN_ON_ONCE(!sc); + if (!sc) + return; + + io_lock = fnic_io_lock_hash(fnic, sc); + spin_lock_irqsave(io_lock, flags); + io_req = (struct fnic_io_req *)CMD_SP(sc); + WARN_ON_ONCE(!io_req); + if (!io_req) { + spin_unlock_irqrestore(io_lock, flags); + return; + } + + /* firmware completed the io */ + io_req->io_completed = 1; + + /* + * if SCSI-ML has already issued abort on this command, + * ignore completion of the IO. The abts path will clean it up + */ + if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { + spin_unlock_irqrestore(io_lock, flags); + return; + } + + /* Mark the IO as complete */ + CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE; + + icmnd_cmpl = &desc->u.icmnd_cmpl; + + switch (hdr_status) { + case FCPIO_SUCCESS: + sc->result = (DID_OK << 16) | icmnd_cmpl->scsi_status; + xfer_len = scsi_bufflen(sc); + scsi_set_resid(sc, icmnd_cmpl->residual); + + if (icmnd_cmpl->flags & FCPIO_ICMND_CMPL_RESID_UNDER) + xfer_len -= icmnd_cmpl->residual; + + /* + * If queue_full, then try to reduce queue depth for all + * LUNS on the target. Todo: this should be accompanied + * by a periodic queue_depth rampup based on successful + * IO completion. + */ + if (icmnd_cmpl->scsi_status == QUEUE_FULL) { + struct scsi_device *t_sdev; + int qd = 0; + + shost_for_each_device(t_sdev, sc->device->host) { + if (t_sdev->id != sc->device->id) + continue; + + if (t_sdev->queue_depth > 1) { + qd = scsi_track_queue_full + (t_sdev, + t_sdev->queue_depth - 1); + if (qd == -1) + qd = t_sdev->host->cmd_per_lun; + shost_printk(KERN_INFO, + fnic->lport->host, + "scsi[%d:%d:%d:%d" + "] queue full detected," + "new depth = %d\n", + t_sdev->host->host_no, + t_sdev->channel, + t_sdev->id, t_sdev->lun, + t_sdev->queue_depth); + } + } + } + break; + + case FCPIO_TIMEOUT: /* request was timed out */ + sc->result = (DID_TIME_OUT << 16) | icmnd_cmpl->scsi_status; + break; + + case FCPIO_ABORTED: /* request was aborted */ + sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; + break; + + case FCPIO_DATA_CNT_MISMATCH: /* recv/sent more/less data than exp. */ + scsi_set_resid(sc, icmnd_cmpl->residual); + sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; + break; + + case FCPIO_OUT_OF_RESOURCE: /* out of resources to complete request */ + sc->result = (DID_REQUEUE << 16) | icmnd_cmpl->scsi_status; + break; + case FCPIO_INVALID_HEADER: /* header contains invalid data */ + case FCPIO_INVALID_PARAM: /* some parameter in request invalid */ + case FCPIO_REQ_NOT_SUPPORTED:/* request type is not supported */ + case FCPIO_IO_NOT_FOUND: /* requested I/O was not found */ + case FCPIO_SGL_INVALID: /* request was aborted due to sgl error */ + case FCPIO_MSS_INVALID: /* request was aborted due to mss error */ + case FCPIO_FW_ERR: /* request was terminated due fw error */ + default: + shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n", + fnic_fcpio_status_to_str(hdr_status)); + sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; + break; + } + + /* Break link with the SCSI command */ + CMD_SP(sc) = NULL; + + spin_unlock_irqrestore(io_lock, flags); + + fnic_release_ioreq_buf(fnic, io_req, sc); + + mempool_free(io_req, fnic->io_req_pool); + + if (sc->sc_data_direction == DMA_FROM_DEVICE) { + fnic->lport->host_stats.fcp_input_requests++; + fnic->fcp_input_bytes += xfer_len; + } else if (sc->sc_data_direction == DMA_TO_DEVICE) { + fnic->lport->host_stats.fcp_output_requests++; + fnic->fcp_output_bytes += xfer_len; + } else + fnic->lport->host_stats.fcp_control_requests++; + + /* Call SCSI completion function to complete the IO */ + if (sc->scsi_done) + sc->scsi_done(sc); + +} + +/* fnic_fcpio_itmf_cmpl_handler + * Routine to handle itmf completions + */ +static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, + struct fcpio_fw_req *desc) +{ + u8 type; + u8 hdr_status; + struct fcpio_tag tag; + u32 id; + struct scsi_cmnd *sc; + struct fnic_io_req *io_req; + unsigned long flags; + spinlock_t *io_lock; + + fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag); + fcpio_tag_id_dec(&tag, &id); + + if ((id & FNIC_TAG_MASK) >= FNIC_MAX_IO_REQ) + return; + + sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK); + WARN_ON_ONCE(!sc); + if (!sc) + return; + + io_lock = fnic_io_lock_hash(fnic, sc); + spin_lock_irqsave(io_lock, flags); + io_req = (struct fnic_io_req *)CMD_SP(sc); + WARN_ON_ONCE(!io_req); + if (!io_req) { + spin_unlock_irqrestore(io_lock, flags); + return; + } + + if (id & FNIC_TAG_ABORT) { + /* Completion of abort cmd */ + if (CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING) { + /* This is a late completion. Ignore it */ + spin_unlock_irqrestore(io_lock, flags); + return; + } + CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE; + CMD_ABTS_STATUS(sc) = hdr_status; + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "abts cmpl recd. id %d status %s\n", + (int)(id & FNIC_TAG_MASK), + fnic_fcpio_status_to_str(hdr_status)); + + /* + * If scsi_eh thread is blocked waiting for abts to complete, + * signal completion to it. IO will be cleaned in the thread + * else clean it in this context + */ + if (io_req->abts_done) { + complete(io_req->abts_done); + spin_unlock_irqrestore(io_lock, flags); + } else { + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "abts cmpl, completing IO\n"); + CMD_SP(sc) = NULL; + sc->result = (DID_ERROR << 16); + + spin_unlock_irqrestore(io_lock, flags); + + fnic_release_ioreq_buf(fnic, io_req, sc); + mempool_free(io_req, fnic->io_req_pool); + if (sc->scsi_done) + sc->scsi_done(sc); + } + + } else if (id & FNIC_TAG_DEV_RST) { + /* Completion of device reset */ + CMD_LR_STATUS(sc) = hdr_status; + CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE; + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "dev reset cmpl recd. id %d status %s\n", + (int)(id & FNIC_TAG_MASK), + fnic_fcpio_status_to_str(hdr_status)); + if (io_req->dr_done) + complete(io_req->dr_done); + spin_unlock_irqrestore(io_lock, flags); + + } else { + shost_printk(KERN_ERR, fnic->lport->host, + "Unexpected itmf io state %s tag %x\n", + fnic_ioreq_state_to_str(CMD_STATE(sc)), id); + spin_unlock_irqrestore(io_lock, flags); + } + +} + +/* + * fnic_fcpio_cmpl_handler + * Routine to service the cq for wq_copy + */ +static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev, + unsigned int cq_index, + struct fcpio_fw_req *desc) +{ + struct fnic *fnic = vnic_dev_priv(vdev); + int ret = 0; + + switch (desc->hdr.type) { + case FCPIO_ACK: /* fw copied copy wq desc to its queue */ + fnic_fcpio_ack_handler(fnic, cq_index, desc); + break; + + case FCPIO_ICMND_CMPL: /* fw completed a command */ + fnic_fcpio_icmnd_cmpl_handler(fnic, desc); + break; + + case FCPIO_ITMF_CMPL: /* fw completed itmf (abort cmd, lun reset)*/ + fnic_fcpio_itmf_cmpl_handler(fnic, desc); + break; + + case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */ + ret = fnic_fcpio_flogi_reg_cmpl_handler(fnic, desc); + break; + + case FCPIO_RESET_CMPL: /* fw completed reset */ + ret = fnic_fcpio_fw_reset_cmpl_handler(fnic, desc); + break; + + default: + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "firmware completion type %d\n", + desc->hdr.type); + break; + } + + return ret; +} + +/* + * fnic_wq_copy_cmpl_handler + * Routine to process wq copy + */ +int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do) +{ + unsigned int wq_work_done = 0; + unsigned int i, cq_index; + unsigned int cur_work_done; + + for (i = 0; i < fnic->wq_copy_count; i++) { + cq_index = i + fnic->raw_wq_count + fnic->rq_count; + cur_work_done = vnic_cq_copy_service(&fnic->cq[cq_index], + fnic_fcpio_cmpl_handler, + copy_work_to_do); + wq_work_done += cur_work_done; + } + return wq_work_done; +} + +static void fnic_cleanup_io(struct fnic *fnic, int exclude_id) +{ + unsigned int i; + struct fnic_io_req *io_req; + unsigned long flags = 0; + struct scsi_cmnd *sc; + spinlock_t *io_lock; + + for (i = 0; i < FNIC_MAX_IO_REQ; i++) { + if (i == exclude_id) + continue; + + sc = scsi_host_find_tag(fnic->lport->host, i); + if (!sc) + continue; + + io_lock = fnic_io_lock_hash(fnic, sc); + spin_lock_irqsave(io_lock, flags); + io_req = (struct fnic_io_req *)CMD_SP(sc); + if (!io_req) { + spin_unlock_irqrestore(io_lock, flags); + goto cleanup_scsi_cmd; + } + + CMD_SP(sc) = NULL; + + spin_unlock_irqrestore(io_lock, flags); + + /* + * If there is a scsi_cmnd associated with this io_req, then + * free the corresponding state + */ + fnic_release_ioreq_buf(fnic, io_req, sc); + mempool_free(io_req, fnic->io_req_pool); + +cleanup_scsi_cmd: + sc->result = DID_TRANSPORT_DISRUPTED << 16; + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "fnic_cleanup_io:" + " DID_TRANSPORT_DISRUPTED\n"); + + /* Complete the command to SCSI */ + if (sc->scsi_done) + sc->scsi_done(sc); + } +} + +void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq, + struct fcpio_host_req *desc) +{ + u32 id; + struct fnic *fnic = vnic_dev_priv(wq->vdev); + struct fnic_io_req *io_req; + struct scsi_cmnd *sc; + unsigned long flags; + spinlock_t *io_lock; + + /* get the tag reference */ + fcpio_tag_id_dec(&desc->hdr.tag, &id); + id &= FNIC_TAG_MASK; + + if (id >= FNIC_MAX_IO_REQ) + return; + + sc = scsi_host_find_tag(fnic->lport->host, id); + if (!sc) + return; + + io_lock = fnic_io_lock_hash(fnic, sc); + spin_lock_irqsave(io_lock, flags); + + /* Get the IO context which this desc refers to */ + io_req = (struct fnic_io_req *)CMD_SP(sc); + + /* fnic interrupts are turned off by now */ + + if (!io_req) { + spin_unlock_irqrestore(io_lock, flags); + goto wq_copy_cleanup_scsi_cmd; + } + + CMD_SP(sc) = NULL; + + spin_unlock_irqrestore(io_lock, flags); + + fnic_release_ioreq_buf(fnic, io_req, sc); + mempool_free(io_req, fnic->io_req_pool); + +wq_copy_cleanup_scsi_cmd: + sc->result = DID_NO_CONNECT << 16; + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "wq_copy_cleanup_handler:" + " DID_NO_CONNECT\n"); + + if (sc->scsi_done) + sc->scsi_done(sc); +} + +static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag, + u32 task_req, u8 *fc_lun, + struct fnic_io_req *io_req) +{ + struct vnic_wq_copy *wq = &fnic->wq_copy[0]; + unsigned long flags; + + spin_lock_irqsave(&fnic->wq_copy_lock[0], flags); + + if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) + free_wq_copy_descs(fnic, wq); + + if (!vnic_wq_copy_desc_avail(wq)) { + spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); + return 1; + } + fnic_queue_wq_copy_desc_itmf(wq, tag | FNIC_TAG_ABORT, + 0, task_req, tag, fc_lun, io_req->port_id, + fnic->config.ra_tov, fnic->config.ed_tov); + + spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); + return 0; +} + +void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id) +{ + int tag; + struct fnic_io_req *io_req; + spinlock_t *io_lock; + unsigned long flags; + struct scsi_cmnd *sc; + struct scsi_lun fc_lun; + enum fnic_ioreq_state old_ioreq_state; + + FNIC_SCSI_DBG(KERN_DEBUG, + fnic->lport->host, + "fnic_rport_reset_exch called portid 0x%06x\n", + port_id); + + if (fnic->in_remove) + return; + + for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) { + sc = scsi_host_find_tag(fnic->lport->host, tag); + if (!sc) + continue; + + io_lock = fnic_io_lock_hash(fnic, sc); + spin_lock_irqsave(io_lock, flags); + + io_req = (struct fnic_io_req *)CMD_SP(sc); + + if (!io_req || io_req->port_id != port_id) { + spin_unlock_irqrestore(io_lock, flags); + continue; + } + + /* + * Found IO that is still pending with firmware and + * belongs to rport that went away + */ + if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { + spin_unlock_irqrestore(io_lock, flags); + continue; + } + old_ioreq_state = CMD_STATE(sc); + CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING; + CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE; + + BUG_ON(io_req->abts_done); + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "fnic_rport_reset_exch: Issuing abts\n"); + + spin_unlock_irqrestore(io_lock, flags); + + /* Now queue the abort command to firmware */ + int_to_scsilun(sc->device->lun, &fc_lun); + + if (fnic_queue_abort_io_req(fnic, tag, + FCPIO_ITMF_ABT_TASK_TERM, + fc_lun.scsi_lun, io_req)) { + /* + * Revert the cmd state back to old state, if + * it hasnt changed in between. This cmd will get + * aborted later by scsi_eh, or cleaned up during + * lun reset + */ + io_lock = fnic_io_lock_hash(fnic, sc); + + spin_lock_irqsave(io_lock, flags); + if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) + CMD_STATE(sc) = old_ioreq_state; + spin_unlock_irqrestore(io_lock, flags); + } + } + +} + +void fnic_terminate_rport_io(struct fc_rport *rport) +{ + int tag; + struct fnic_io_req *io_req; + spinlock_t *io_lock; + unsigned long flags; + struct scsi_cmnd *sc; + struct scsi_lun fc_lun; + struct fc_rport_libfc_priv *rdata = rport->dd_data; + struct fc_lport *lport = rdata->local_port; + struct fnic *fnic = lport_priv(lport); + struct fc_rport *cmd_rport; + enum fnic_ioreq_state old_ioreq_state; + + FNIC_SCSI_DBG(KERN_DEBUG, + fnic->lport->host, "fnic_terminate_rport_io called" + " wwpn 0x%llx, wwnn0x%llx, portid 0x%06x\n", + rport->port_name, rport->node_name, + rport->port_id); + + if (fnic->in_remove) + return; + + for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) { + sc = scsi_host_find_tag(fnic->lport->host, tag); + if (!sc) + continue; + + cmd_rport = starget_to_rport(scsi_target(sc->device)); + if (rport != cmd_rport) + continue; + + io_lock = fnic_io_lock_hash(fnic, sc); + spin_lock_irqsave(io_lock, flags); + + io_req = (struct fnic_io_req *)CMD_SP(sc); + + if (!io_req || rport != cmd_rport) { + spin_unlock_irqrestore(io_lock, flags); + continue; + } + + /* + * Found IO that is still pending with firmware and + * belongs to rport that went away + */ + if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { + spin_unlock_irqrestore(io_lock, flags); + continue; + } + old_ioreq_state = CMD_STATE(sc); + CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING; + CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE; + + BUG_ON(io_req->abts_done); + + FNIC_SCSI_DBG(KERN_DEBUG, + fnic->lport->host, + "fnic_terminate_rport_io: Issuing abts\n"); + + spin_unlock_irqrestore(io_lock, flags); + + /* Now queue the abort command to firmware */ + int_to_scsilun(sc->device->lun, &fc_lun); + + if (fnic_queue_abort_io_req(fnic, tag, + FCPIO_ITMF_ABT_TASK_TERM, + fc_lun.scsi_lun, io_req)) { + /* + * Revert the cmd state back to old state, if + * it hasnt changed in between. This cmd will get + * aborted later by scsi_eh, or cleaned up during + * lun reset + */ + io_lock = fnic_io_lock_hash(fnic, sc); + + spin_lock_irqsave(io_lock, flags); + if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) + CMD_STATE(sc) = old_ioreq_state; + spin_unlock_irqrestore(io_lock, flags); + } + } + +} + +static void fnic_block_error_handler(struct scsi_cmnd *sc) +{ + struct Scsi_Host *shost = sc->device->host; + struct fc_rport *rport = starget_to_rport(scsi_target(sc->device)); + unsigned long flags; + + spin_lock_irqsave(shost->host_lock, flags); + while (rport->port_state == FC_PORTSTATE_BLOCKED) { + spin_unlock_irqrestore(shost->host_lock, flags); + msleep(1000); + spin_lock_irqsave(shost->host_lock, flags); + } + spin_unlock_irqrestore(shost->host_lock, flags); + +} + +/* + * This function is exported to SCSI for sending abort cmnds. + * A SCSI IO is represented by a io_req in the driver. + * The ioreq is linked to the SCSI Cmd, thus a link with the ULP's IO. + */ +int fnic_abort_cmd(struct scsi_cmnd *sc) +{ + struct fc_lport *lp; + struct fnic *fnic; + struct fnic_io_req *io_req; + struct fc_rport *rport; + spinlock_t *io_lock; + unsigned long flags; + int ret = SUCCESS; + u32 task_req; + struct scsi_lun fc_lun; + DECLARE_COMPLETION_ONSTACK(tm_done); + + /* Wait for rport to unblock */ + fnic_block_error_handler(sc); + + /* Get local-port, check ready and link up */ + lp = shost_priv(sc->device->host); + + fnic = lport_priv(lp); + FNIC_SCSI_DBG(KERN_DEBUG, + fnic->lport->host, + "Abort Cmd called FCID 0x%x, LUN 0x%x TAG %d\n", + (starget_to_rport(scsi_target(sc->device)))->port_id, + sc->device->lun, sc->request->tag); + + if (lp->state != LPORT_ST_READY || !(lp->link_up)) { + ret = FAILED; + goto fnic_abort_cmd_end; + } + + /* + * Avoid a race between SCSI issuing the abort and the device + * completing the command. + * + * If the command is already completed by the fw cmpl code, + * we just return SUCCESS from here. This means that the abort + * succeeded. In the SCSI ML, since the timeout for command has + * happened, the completion wont actually complete the command + * and it will be considered as an aborted command + * + * The CMD_SP will not be cleared except while holding io_req_lock. + */ + io_lock = fnic_io_lock_hash(fnic, sc); + spin_lock_irqsave(io_lock, flags); + io_req = (struct fnic_io_req *)CMD_SP(sc); + if (!io_req) { + spin_unlock_irqrestore(io_lock, flags); + goto fnic_abort_cmd_end; + } + + io_req->abts_done = &tm_done; + + if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { + spin_unlock_irqrestore(io_lock, flags); + goto wait_pending; + } + /* + * Command is still pending, need to abort it + * If the firmware completes the command after this point, + * the completion wont be done till mid-layer, since abort + * has already started. + */ + CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING; + CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE; + + spin_unlock_irqrestore(io_lock, flags); + + /* + * Check readiness of the remote port. If the path to remote + * port is up, then send abts to the remote port to terminate + * the IO. Else, just locally terminate the IO in the firmware + */ + rport = starget_to_rport(scsi_target(sc->device)); + if (fc_remote_port_chkready(rport) == 0) + task_req = FCPIO_ITMF_ABT_TASK; + else + task_req = FCPIO_ITMF_ABT_TASK_TERM; + + /* Now queue the abort command to firmware */ + int_to_scsilun(sc->device->lun, &fc_lun); + + if (fnic_queue_abort_io_req(fnic, sc->request->tag, task_req, + fc_lun.scsi_lun, io_req)) { + spin_lock_irqsave(io_lock, flags); + io_req = (struct fnic_io_req *)CMD_SP(sc); + if (io_req) + io_req->abts_done = NULL; + spin_unlock_irqrestore(io_lock, flags); + ret = FAILED; + goto fnic_abort_cmd_end; + } + + /* + * We queued an abort IO, wait for its completion. + * Once the firmware completes the abort command, it will + * wake up this thread. + */ + wait_pending: + wait_for_completion_timeout(&tm_done, + msecs_to_jiffies + (2 * fnic->config.ra_tov + + fnic->config.ed_tov)); + + /* Check the abort status */ + spin_lock_irqsave(io_lock, flags); + + io_req = (struct fnic_io_req *)CMD_SP(sc); + if (!io_req) { + spin_unlock_irqrestore(io_lock, flags); + ret = FAILED; + goto fnic_abort_cmd_end; + } + io_req->abts_done = NULL; + + /* fw did not complete abort, timed out */ + if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { + spin_unlock_irqrestore(io_lock, flags); + ret = FAILED; + goto fnic_abort_cmd_end; + } + + /* + * firmware completed the abort, check the status, + * free the io_req irrespective of failure or success + */ + if (CMD_ABTS_STATUS(sc) != FCPIO_SUCCESS) + ret = FAILED; + + CMD_SP(sc) = NULL; + + spin_unlock_irqrestore(io_lock, flags); + + fnic_release_ioreq_buf(fnic, io_req, sc); + mempool_free(io_req, fnic->io_req_pool); + +fnic_abort_cmd_end: + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "Returning from abort cmd %s\n", + (ret == SUCCESS) ? + "SUCCESS" : "FAILED"); + return ret; +} + +static inline int fnic_queue_dr_io_req(struct fnic *fnic, + struct scsi_cmnd *sc, + struct fnic_io_req *io_req) +{ + struct vnic_wq_copy *wq = &fnic->wq_copy[0]; + struct scsi_lun fc_lun; + int ret = 0; + unsigned long intr_flags; + + spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags); + + if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) + free_wq_copy_descs(fnic, wq); + + if (!vnic_wq_copy_desc_avail(wq)) { + ret = -EAGAIN; + goto lr_io_req_end; + } + + /* fill in the lun info */ + int_to_scsilun(sc->device->lun, &fc_lun); + + fnic_queue_wq_copy_desc_itmf(wq, sc->request->tag | FNIC_TAG_DEV_RST, + 0, FCPIO_ITMF_LUN_RESET, SCSI_NO_TAG, + fc_lun.scsi_lun, io_req->port_id, + fnic->config.ra_tov, fnic->config.ed_tov); + +lr_io_req_end: + spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags); + + return ret; +} + +/* + * Clean up any pending aborts on the lun + * For each outstanding IO on this lun, whose abort is not completed by fw, + * issue a local abort. Wait for abort to complete. Return 0 if all commands + * successfully aborted, 1 otherwise + */ +static int fnic_clean_pending_aborts(struct fnic *fnic, + struct scsi_cmnd *lr_sc) +{ + int tag; + struct fnic_io_req *io_req; + spinlock_t *io_lock; + unsigned long flags; + int ret = 0; + struct scsi_cmnd *sc; + struct fc_rport *rport; + struct scsi_lun fc_lun; + struct scsi_device *lun_dev = lr_sc->device; + DECLARE_COMPLETION_ONSTACK(tm_done); + + for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) { + sc = scsi_host_find_tag(fnic->lport->host, tag); + /* + * ignore this lun reset cmd or cmds that do not belong to + * this lun + */ + if (!sc || sc == lr_sc || sc->device != lun_dev) + continue; + + io_lock = fnic_io_lock_hash(fnic, sc); + spin_lock_irqsave(io_lock, flags); + + io_req = (struct fnic_io_req *)CMD_SP(sc); + + if (!io_req || sc->device != lun_dev) { + spin_unlock_irqrestore(io_lock, flags); + continue; + } + + /* + * Found IO that is still pending with firmware and + * belongs to the LUN that we are resetting + */ + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "Found IO in %s on lun\n", + fnic_ioreq_state_to_str(CMD_STATE(sc))); + + BUG_ON(CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING); + + CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE; + io_req->abts_done = &tm_done; + spin_unlock_irqrestore(io_lock, flags); + + /* Now queue the abort command to firmware */ + int_to_scsilun(sc->device->lun, &fc_lun); + rport = starget_to_rport(scsi_target(sc->device)); + + if (fnic_queue_abort_io_req(fnic, tag, + FCPIO_ITMF_ABT_TASK_TERM, + fc_lun.scsi_lun, io_req)) { + spin_lock_irqsave(io_lock, flags); + io_req = (struct fnic_io_req *)CMD_SP(sc); + if (io_req) + io_req->abts_done = NULL; + spin_unlock_irqrestore(io_lock, flags); + ret = 1; + goto clean_pending_aborts_end; + } + + wait_for_completion_timeout(&tm_done, + msecs_to_jiffies + (fnic->config.ed_tov)); + + /* Recheck cmd state to check if it is now aborted */ + spin_lock_irqsave(io_lock, flags); + io_req = (struct fnic_io_req *)CMD_SP(sc); + if (!io_req) { + spin_unlock_irqrestore(io_lock, flags); + ret = 1; + goto clean_pending_aborts_end; + } + + io_req->abts_done = NULL; + + /* if abort is still pending with fw, fail */ + if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { + spin_unlock_irqrestore(io_lock, flags); + ret = 1; + goto clean_pending_aborts_end; + } + CMD_SP(sc) = NULL; + spin_unlock_irqrestore(io_lock, flags); + + fnic_release_ioreq_buf(fnic, io_req, sc); + mempool_free(io_req, fnic->io_req_pool); + } + +clean_pending_aborts_end: + return ret; +} + +/* + * SCSI Eh thread issues a Lun Reset when one or more commands on a LUN + * fail to get aborted. It calls driver's eh_device_reset with a SCSI command + * on the LUN. + */ +int fnic_device_reset(struct scsi_cmnd *sc) +{ + struct fc_lport *lp; + struct fnic *fnic; + struct fnic_io_req *io_req; + struct fc_rport *rport; + int status; + int ret = FAILED; + spinlock_t *io_lock; + unsigned long flags; + DECLARE_COMPLETION_ONSTACK(tm_done); + + /* Wait for rport to unblock */ + fnic_block_error_handler(sc); + + /* Get local-port, check ready and link up */ + lp = shost_priv(sc->device->host); + + fnic = lport_priv(lp); + FNIC_SCSI_DBG(KERN_DEBUG, + fnic->lport->host, + "Device reset called FCID 0x%x, LUN 0x%x\n", + (starget_to_rport(scsi_target(sc->device)))->port_id, + sc->device->lun); + + + if (lp->state != LPORT_ST_READY || !(lp->link_up)) + goto fnic_device_reset_end; + + /* Check if remote port up */ + rport = starget_to_rport(scsi_target(sc->device)); + if (fc_remote_port_chkready(rport)) + goto fnic_device_reset_end; + + io_lock = fnic_io_lock_hash(fnic, sc); + spin_lock_irqsave(io_lock, flags); + io_req = (struct fnic_io_req *)CMD_SP(sc); + + /* + * If there is a io_req attached to this command, then use it, + * else allocate a new one. + */ + if (!io_req) { + io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC); + if (!io_req) { + spin_unlock_irqrestore(io_lock, flags); + goto fnic_device_reset_end; + } + memset(io_req, 0, sizeof(*io_req)); + io_req->port_id = rport->port_id; + CMD_SP(sc) = (char *)io_req; + } + io_req->dr_done = &tm_done; + CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING; + CMD_LR_STATUS(sc) = FCPIO_INVALID_CODE; + spin_unlock_irqrestore(io_lock, flags); + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "TAG %d\n", + sc->request->tag); + + /* + * issue the device reset, if enqueue failed, clean up the ioreq + * and break assoc with scsi cmd + */ + if (fnic_queue_dr_io_req(fnic, sc, io_req)) { + spin_lock_irqsave(io_lock, flags); + io_req = (struct fnic_io_req *)CMD_SP(sc); + if (io_req) + io_req->dr_done = NULL; + goto fnic_device_reset_clean; + } + + /* + * Wait on the local completion for LUN reset. The io_req may be + * freed while we wait since we hold no lock. + */ + wait_for_completion_timeout(&tm_done, + msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT)); + + spin_lock_irqsave(io_lock, flags); + io_req = (struct fnic_io_req *)CMD_SP(sc); + if (!io_req) { + spin_unlock_irqrestore(io_lock, flags); + goto fnic_device_reset_end; + } + io_req->dr_done = NULL; + + status = CMD_LR_STATUS(sc); + spin_unlock_irqrestore(io_lock, flags); + + /* + * If lun reset not completed, bail out with failed. io_req + * gets cleaned up during higher levels of EH + */ + if (status == FCPIO_INVALID_CODE) { + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "Device reset timed out\n"); + goto fnic_device_reset_end; + } + + /* Completed, but not successful, clean up the io_req, return fail */ + if (status != FCPIO_SUCCESS) { + spin_lock_irqsave(io_lock, flags); + FNIC_SCSI_DBG(KERN_DEBUG, + fnic->lport->host, + "Device reset completed - failed\n"); + io_req = (struct fnic_io_req *)CMD_SP(sc); + goto fnic_device_reset_clean; + } + + /* + * Clean up any aborts on this lun that have still not + * completed. If any of these fail, then LUN reset fails. + * clean_pending_aborts cleans all cmds on this lun except + * the lun reset cmd. If all cmds get cleaned, the lun reset + * succeeds + */ + if (fnic_clean_pending_aborts(fnic, sc)) { + spin_lock_irqsave(io_lock, flags); + io_req = (struct fnic_io_req *)CMD_SP(sc); + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "Device reset failed" + " since could not abort all IOs\n"); + goto fnic_device_reset_clean; + } + + /* Clean lun reset command */ + spin_lock_irqsave(io_lock, flags); + io_req = (struct fnic_io_req *)CMD_SP(sc); + if (io_req) + /* Completed, and successful */ + ret = SUCCESS; + +fnic_device_reset_clean: + if (io_req) + CMD_SP(sc) = NULL; + + spin_unlock_irqrestore(io_lock, flags); + + if (io_req) { + fnic_release_ioreq_buf(fnic, io_req, sc); + mempool_free(io_req, fnic->io_req_pool); + } + +fnic_device_reset_end: + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "Returning from device reset %s\n", + (ret == SUCCESS) ? + "SUCCESS" : "FAILED"); + return ret; +} + +/* Clean up all IOs, clean up libFC local port */ +int fnic_reset(struct Scsi_Host *shost) +{ + struct fc_lport *lp; + struct fnic *fnic; + int ret = SUCCESS; + + lp = shost_priv(shost); + fnic = lport_priv(lp); + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "fnic_reset called\n"); + + /* + * Reset local port, this will clean up libFC exchanges, + * reset remote port sessions, and if link is up, begin flogi + */ + if (lp->tt.lport_reset(lp)) + ret = FAILED; + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "Returning from fnic reset %s\n", + (ret == SUCCESS) ? + "SUCCESS" : "FAILED"); + + return ret; +} + +/* + * SCSI Error handling calls driver's eh_host_reset if all prior + * error handling levels return FAILED. If host reset completes + * successfully, and if link is up, then Fabric login begins. + * + * Host Reset is the highest level of error recovery. If this fails, then + * host is offlined by SCSI. + * + */ +int fnic_host_reset(struct scsi_cmnd *sc) +{ + int ret; + unsigned long wait_host_tmo; + struct Scsi_Host *shost = sc->device->host; + struct fc_lport *lp = shost_priv(shost); + + /* + * If fnic_reset is successful, wait for fabric login to complete + * scsi-ml tries to send a TUR to every device if host reset is + * successful, so before returning to scsi, fabric should be up + */ + ret = fnic_reset(shost); + if (ret == SUCCESS) { + wait_host_tmo = jiffies + FNIC_HOST_RESET_SETTLE_TIME * HZ; + ret = FAILED; + while (time_before(jiffies, wait_host_tmo)) { + if ((lp->state == LPORT_ST_READY) && + (lp->link_up)) { + ret = SUCCESS; + break; + } + ssleep(1); + } + } + + return ret; +} + +/* + * This fxn is called from libFC when host is removed + */ +void fnic_scsi_abort_io(struct fc_lport *lp) +{ + int err = 0; + unsigned long flags; + enum fnic_state old_state; + struct fnic *fnic = lport_priv(lp); + DECLARE_COMPLETION_ONSTACK(remove_wait); + + /* Issue firmware reset for fnic, wait for reset to complete */ + spin_lock_irqsave(&fnic->fnic_lock, flags); + fnic->remove_wait = &remove_wait; + old_state = fnic->state; + fnic->state = FNIC_IN_FC_TRANS_ETH_MODE; + vnic_dev_del_addr(fnic->vdev, fnic->data_src_addr); + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + err = fnic_fw_reset_handler(fnic); + if (err) { + spin_lock_irqsave(&fnic->fnic_lock, flags); + if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) + fnic->state = old_state; + fnic->remove_wait = NULL; + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return; + } + + /* Wait for firmware reset to complete */ + wait_for_completion_timeout(&remove_wait, + msecs_to_jiffies(FNIC_RMDEVICE_TIMEOUT)); + + spin_lock_irqsave(&fnic->fnic_lock, flags); + fnic->remove_wait = NULL; + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "fnic_scsi_abort_io %s\n", + (fnic->state == FNIC_IN_ETH_MODE) ? + "SUCCESS" : "FAILED"); + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + +} + +/* + * This fxn called from libFC to clean up driver IO state on link down + */ +void fnic_scsi_cleanup(struct fc_lport *lp) +{ + unsigned long flags; + enum fnic_state old_state; + struct fnic *fnic = lport_priv(lp); + + /* issue fw reset */ + spin_lock_irqsave(&fnic->fnic_lock, flags); + old_state = fnic->state; + fnic->state = FNIC_IN_FC_TRANS_ETH_MODE; + vnic_dev_del_addr(fnic->vdev, fnic->data_src_addr); + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + if (fnic_fw_reset_handler(fnic)) { + spin_lock_irqsave(&fnic->fnic_lock, flags); + if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) + fnic->state = old_state; + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + } + +} + +void fnic_empty_scsi_cleanup(struct fc_lport *lp) +{ +} + +void fnic_exch_mgr_reset(struct fc_lport *lp, u32 sid, u32 did) +{ + struct fnic *fnic = lport_priv(lp); + + /* Non-zero sid, nothing to do */ + if (sid) + goto call_fc_exch_mgr_reset; + + if (did) { + fnic_rport_exch_reset(fnic, did); + goto call_fc_exch_mgr_reset; + } + + /* + * sid = 0, did = 0 + * link down or device being removed + */ + if (!fnic->in_remove) + fnic_scsi_cleanup(lp); + else + fnic_scsi_abort_io(lp); + + /* call libFC exch mgr reset to reset its exchanges */ +call_fc_exch_mgr_reset: + fc_exch_mgr_reset(lp, sid, did); + +} diff --git a/drivers/scsi/fnic/rq_enet_desc.h b/drivers/scsi/fnic/rq_enet_desc.h new file mode 100644 index 00000000000..92e80ae6b72 --- /dev/null +++ b/drivers/scsi/fnic/rq_enet_desc.h @@ -0,0 +1,58 @@ +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef _RQ_ENET_DESC_H_ +#define _RQ_ENET_DESC_H_ + +/* Ethernet receive queue descriptor: 16B */ +struct rq_enet_desc { + __le64 address; + __le16 length_type; + u8 reserved[6]; +}; + +enum rq_enet_type_types { + RQ_ENET_TYPE_ONLY_SOP = 0, + RQ_ENET_TYPE_NOT_SOP = 1, + RQ_ENET_TYPE_RESV2 = 2, + RQ_ENET_TYPE_RESV3 = 3, +}; + +#define RQ_ENET_ADDR_BITS 64 +#define RQ_ENET_LEN_BITS 14 +#define RQ_ENET_LEN_MASK ((1 << RQ_ENET_LEN_BITS) - 1) +#define RQ_ENET_TYPE_BITS 2 +#define RQ_ENET_TYPE_MASK ((1 << RQ_ENET_TYPE_BITS) - 1) + +static inline void rq_enet_desc_enc(struct rq_enet_desc *desc, + u64 address, u8 type, u16 length) +{ + desc->address = cpu_to_le64(address); + desc->length_type = cpu_to_le16((length & RQ_ENET_LEN_MASK) | + ((type & RQ_ENET_TYPE_MASK) << RQ_ENET_LEN_BITS)); +} + +static inline void rq_enet_desc_dec(struct rq_enet_desc *desc, + u64 *address, u8 *type, u16 *length) +{ + *address = le64_to_cpu(desc->address); + *length = le16_to_cpu(desc->length_type) & RQ_ENET_LEN_MASK; + *type = (u8)((le16_to_cpu(desc->length_type) >> RQ_ENET_LEN_BITS) & + RQ_ENET_TYPE_MASK); +} + +#endif /* _RQ_ENET_DESC_H_ */ diff --git a/drivers/scsi/fnic/vnic_cq.c b/drivers/scsi/fnic/vnic_cq.c new file mode 100644 index 00000000000..c5db32eda5e --- /dev/null +++ b/drivers/scsi/fnic/vnic_cq.c @@ -0,0 +1,85 @@ +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include +#include +#include +#include "vnic_dev.h" +#include "vnic_cq.h" + +void vnic_cq_free(struct vnic_cq *cq) +{ + vnic_dev_free_desc_ring(cq->vdev, &cq->ring); + + cq->ctrl = NULL; +} + +int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index, + unsigned int desc_count, unsigned int desc_size) +{ + int err; + + cq->index = index; + cq->vdev = vdev; + + cq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_CQ, index); + if (!cq->ctrl) { + printk(KERN_ERR "Failed to hook CQ[%d] resource\n", index); + return -EINVAL; + } + + err = vnic_dev_alloc_desc_ring(vdev, &cq->ring, desc_count, desc_size); + if (err) + return err; + + return 0; +} + +void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable, + unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail, + unsigned int cq_tail_color, unsigned int interrupt_enable, + unsigned int cq_entry_enable, unsigned int cq_message_enable, + unsigned int interrupt_offset, u64 cq_message_addr) +{ + u64 paddr; + + paddr = (u64)cq->ring.base_addr | VNIC_PADDR_TARGET; + writeq(paddr, &cq->ctrl->ring_base); + iowrite32(cq->ring.desc_count, &cq->ctrl->ring_size); + iowrite32(flow_control_enable, &cq->ctrl->flow_control_enable); + iowrite32(color_enable, &cq->ctrl->color_enable); + iowrite32(cq_head, &cq->ctrl->cq_head); + iowrite32(cq_tail, &cq->ctrl->cq_tail); + iowrite32(cq_tail_color, &cq->ctrl->cq_tail_color); + iowrite32(interrupt_enable, &cq->ctrl->interrupt_enable); + iowrite32(cq_entry_enable, &cq->ctrl->cq_entry_enable); + iowrite32(cq_message_enable, &cq->ctrl->cq_message_enable); + iowrite32(interrupt_offset, &cq->ctrl->interrupt_offset); + writeq(cq_message_addr, &cq->ctrl->cq_message_addr); +} + +void vnic_cq_clean(struct vnic_cq *cq) +{ + cq->to_clean = 0; + cq->last_color = 0; + + iowrite32(0, &cq->ctrl->cq_head); + iowrite32(0, &cq->ctrl->cq_tail); + iowrite32(1, &cq->ctrl->cq_tail_color); + + vnic_dev_clear_desc_ring(&cq->ring); +} diff --git a/drivers/scsi/fnic/vnic_cq.h b/drivers/scsi/fnic/vnic_cq.h new file mode 100644 index 00000000000..4ede6809fb1 --- /dev/null +++ b/drivers/scsi/fnic/vnic_cq.h @@ -0,0 +1,121 @@ +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef _VNIC_CQ_H_ +#define _VNIC_CQ_H_ + +#include "cq_desc.h" +#include "vnic_dev.h" + +/* + * These defines avoid symbol clash between fnic and enic (Cisco 10G Eth + * Driver) when both are built with CONFIG options =y + */ +#define vnic_cq_service fnic_cq_service +#define vnic_cq_free fnic_cq_free +#define vnic_cq_alloc fnic_cq_alloc +#define vnic_cq_init fnic_cq_init +#define vnic_cq_clean fnic_cq_clean + +/* Completion queue control */ +struct vnic_cq_ctrl { + u64 ring_base; /* 0x00 */ + u32 ring_size; /* 0x08 */ + u32 pad0; + u32 flow_control_enable; /* 0x10 */ + u32 pad1; + u32 color_enable; /* 0x18 */ + u32 pad2; + u32 cq_head; /* 0x20 */ + u32 pad3; + u32 cq_tail; /* 0x28 */ + u32 pad4; + u32 cq_tail_color; /* 0x30 */ + u32 pad5; + u32 interrupt_enable; /* 0x38 */ + u32 pad6; + u32 cq_entry_enable; /* 0x40 */ + u32 pad7; + u32 cq_message_enable; /* 0x48 */ + u32 pad8; + u32 interrupt_offset; /* 0x50 */ + u32 pad9; + u64 cq_message_addr; /* 0x58 */ + u32 pad10; +}; + +struct vnic_cq { + unsigned int index; + struct vnic_dev *vdev; + struct vnic_cq_ctrl __iomem *ctrl; /* memory-mapped */ + struct vnic_dev_ring ring; + unsigned int to_clean; + unsigned int last_color; +}; + +static inline unsigned int vnic_cq_service(struct vnic_cq *cq, + unsigned int work_to_do, + int (*q_service)(struct vnic_dev *vdev, struct cq_desc *cq_desc, + u8 type, u16 q_number, u16 completed_index, void *opaque), + void *opaque) +{ + struct cq_desc *cq_desc; + unsigned int work_done = 0; + u16 q_number, completed_index; + u8 type, color; + + cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + + cq->ring.desc_size * cq->to_clean); + cq_desc_dec(cq_desc, &type, &color, + &q_number, &completed_index); + + while (color != cq->last_color) { + + if ((*q_service)(cq->vdev, cq_desc, type, + q_number, completed_index, opaque)) + break; + + cq->to_clean++; + if (cq->to_clean == cq->ring.desc_count) { + cq->to_clean = 0; + cq->last_color = cq->last_color ? 0 : 1; + } + + cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + + cq->ring.desc_size * cq->to_clean); + cq_desc_dec(cq_desc, &type, &color, + &q_number, &completed_index); + + work_done++; + if (work_done >= work_to_do) + break; + } + + return work_done; +} + +void vnic_cq_free(struct vnic_cq *cq); +int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index, + unsigned int desc_count, unsigned int desc_size); +void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable, + unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail, + unsigned int cq_tail_color, unsigned int interrupt_enable, + unsigned int cq_entry_enable, unsigned int message_enable, + unsigned int interrupt_offset, u64 message_addr); +void vnic_cq_clean(struct vnic_cq *cq); + +#endif /* _VNIC_CQ_H_ */ diff --git a/drivers/scsi/fnic/vnic_cq_copy.h b/drivers/scsi/fnic/vnic_cq_copy.h new file mode 100644 index 00000000000..7901ce255a8 --- /dev/null +++ b/drivers/scsi/fnic/vnic_cq_copy.h @@ -0,0 +1,62 @@ +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef _VNIC_CQ_COPY_H_ +#define _VNIC_CQ_COPY_H_ + +#include "fcpio.h" + +static inline unsigned int vnic_cq_copy_service( + struct vnic_cq *cq, + int (*q_service)(struct vnic_dev *vdev, + unsigned int index, + struct fcpio_fw_req *desc), + unsigned int work_to_do) + +{ + struct fcpio_fw_req *desc; + unsigned int work_done = 0; + u8 color; + + desc = (struct fcpio_fw_req *)((u8 *)cq->ring.descs + + cq->ring.desc_size * cq->to_clean); + fcpio_color_dec(desc, &color); + + while (color != cq->last_color) { + + if ((*q_service)(cq->vdev, cq->index, desc)) + break; + + cq->to_clean++; + if (cq->to_clean == cq->ring.desc_count) { + cq->to_clean = 0; + cq->last_color = cq->last_color ? 0 : 1; + } + + desc = (struct fcpio_fw_req *)((u8 *)cq->ring.descs + + cq->ring.desc_size * cq->to_clean); + fcpio_color_dec(desc, &color); + + work_done++; + if (work_done >= work_to_do) + break; + } + + return work_done; +} + +#endif /* _VNIC_CQ_COPY_H_ */ diff --git a/drivers/scsi/fnic/vnic_dev.c b/drivers/scsi/fnic/vnic_dev.c new file mode 100644 index 00000000000..56677064508 --- /dev/null +++ b/drivers/scsi/fnic/vnic_dev.c @@ -0,0 +1,690 @@ +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include "vnic_resource.h" +#include "vnic_devcmd.h" +#include "vnic_dev.h" +#include "vnic_stats.h" + +struct vnic_res { + void __iomem *vaddr; + unsigned int count; +}; + +struct vnic_dev { + void *priv; + struct pci_dev *pdev; + struct vnic_res res[RES_TYPE_MAX]; + enum vnic_dev_intr_mode intr_mode; + struct vnic_devcmd __iomem *devcmd; + struct vnic_devcmd_notify *notify; + struct vnic_devcmd_notify notify_copy; + dma_addr_t notify_pa; + u32 *linkstatus; + dma_addr_t linkstatus_pa; + struct vnic_stats *stats; + dma_addr_t stats_pa; + struct vnic_devcmd_fw_info *fw_info; + dma_addr_t fw_info_pa; +}; + +#define VNIC_MAX_RES_HDR_SIZE \ + (sizeof(struct vnic_resource_header) + \ + sizeof(struct vnic_resource) * RES_TYPE_MAX) +#define VNIC_RES_STRIDE 128 + +void *vnic_dev_priv(struct vnic_dev *vdev) +{ + return vdev->priv; +} + +static int vnic_dev_discover_res(struct vnic_dev *vdev, + struct vnic_dev_bar *bar) +{ + struct vnic_resource_header __iomem *rh; + struct vnic_resource __iomem *r; + u8 type; + + if (bar->len < VNIC_MAX_RES_HDR_SIZE) { + printk(KERN_ERR "vNIC BAR0 res hdr length error\n"); + return -EINVAL; + } + + rh = bar->vaddr; + if (!rh) { + printk(KERN_ERR "vNIC BAR0 res hdr not mem-mapped\n"); + return -EINVAL; + } + + if (ioread32(&rh->magic) != VNIC_RES_MAGIC || + ioread32(&rh->version) != VNIC_RES_VERSION) { + printk(KERN_ERR "vNIC BAR0 res magic/version error " + "exp (%lx/%lx) curr (%x/%x)\n", + VNIC_RES_MAGIC, VNIC_RES_VERSION, + ioread32(&rh->magic), ioread32(&rh->version)); + return -EINVAL; + } + + r = (struct vnic_resource __iomem *)(rh + 1); + + while ((type = ioread8(&r->type)) != RES_TYPE_EOL) { + + u8 bar_num = ioread8(&r->bar); + u32 bar_offset = ioread32(&r->bar_offset); + u32 count = ioread32(&r->count); + u32 len; + + r++; + + if (bar_num != 0) /* only mapping in BAR0 resources */ + continue; + + switch (type) { + case RES_TYPE_WQ: + case RES_TYPE_RQ: + case RES_TYPE_CQ: + case RES_TYPE_INTR_CTRL: + /* each count is stride bytes long */ + len = count * VNIC_RES_STRIDE; + if (len + bar_offset > bar->len) { + printk(KERN_ERR "vNIC BAR0 resource %d " + "out-of-bounds, offset 0x%x + " + "size 0x%x > bar len 0x%lx\n", + type, bar_offset, + len, + bar->len); + return -EINVAL; + } + break; + case RES_TYPE_INTR_PBA_LEGACY: + case RES_TYPE_DEVCMD: + len = count; + break; + default: + continue; + } + + vdev->res[type].count = count; + vdev->res[type].vaddr = (char __iomem *)bar->vaddr + bar_offset; + } + + return 0; +} + +unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev, + enum vnic_res_type type) +{ + return vdev->res[type].count; +} + +void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type, + unsigned int index) +{ + if (!vdev->res[type].vaddr) + return NULL; + + switch (type) { + case RES_TYPE_WQ: + case RES_TYPE_RQ: + case RES_TYPE_CQ: + case RES_TYPE_INTR_CTRL: + return (char __iomem *)vdev->res[type].vaddr + + index * VNIC_RES_STRIDE; + default: + return (char __iomem *)vdev->res[type].vaddr; + } +} + +unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring, + unsigned int desc_count, + unsigned int desc_size) +{ + /* The base address of the desc rings must be 512 byte aligned. + * Descriptor count is aligned to groups of 32 descriptors. A + * count of 0 means the maximum 4096 descriptors. Descriptor + * size is aligned to 16 bytes. + */ + + unsigned int count_align = 32; + unsigned int desc_align = 16; + + ring->base_align = 512; + + if (desc_count == 0) + desc_count = 4096; + + ring->desc_count = ALIGN(desc_count, count_align); + + ring->desc_size = ALIGN(desc_size, desc_align); + + ring->size = ring->desc_count * ring->desc_size; + ring->size_unaligned = ring->size + ring->base_align; + + return ring->size_unaligned; +} + +void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring) +{ + memset(ring->descs, 0, ring->size); +} + +int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring, + unsigned int desc_count, unsigned int desc_size) +{ + vnic_dev_desc_ring_size(ring, desc_count, desc_size); + + ring->descs_unaligned = pci_alloc_consistent(vdev->pdev, + ring->size_unaligned, + &ring->base_addr_unaligned); + + if (!ring->descs_unaligned) { + printk(KERN_ERR + "Failed to allocate ring (size=%d), aborting\n", + (int)ring->size); + return -ENOMEM; + } + + ring->base_addr = ALIGN(ring->base_addr_unaligned, + ring->base_align); + ring->descs = (u8 *)ring->descs_unaligned + + (ring->base_addr - ring->base_addr_unaligned); + + vnic_dev_clear_desc_ring(ring); + + ring->desc_avail = ring->desc_count - 1; + + return 0; +} + +void vnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring) +{ + if (ring->descs) { + pci_free_consistent(vdev->pdev, + ring->size_unaligned, + ring->descs_unaligned, + ring->base_addr_unaligned); + ring->descs = NULL; + } +} + +int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, + u64 *a0, u64 *a1, int wait) +{ + struct vnic_devcmd __iomem *devcmd = vdev->devcmd; + int delay; + u32 status; + int dev_cmd_err[] = { + /* convert from fw's version of error.h to host's version */ + 0, /* ERR_SUCCESS */ + EINVAL, /* ERR_EINVAL */ + EFAULT, /* ERR_EFAULT */ + EPERM, /* ERR_EPERM */ + EBUSY, /* ERR_EBUSY */ + }; + int err; + + status = ioread32(&devcmd->status); + if (status & STAT_BUSY) { + printk(KERN_ERR "Busy devcmd %d\n", _CMD_N(cmd)); + return -EBUSY; + } + + if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) { + writeq(*a0, &devcmd->args[0]); + writeq(*a1, &devcmd->args[1]); + wmb(); + } + + iowrite32(cmd, &devcmd->cmd); + + if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT)) + return 0; + + for (delay = 0; delay < wait; delay++) { + + udelay(100); + + status = ioread32(&devcmd->status); + if (!(status & STAT_BUSY)) { + + if (status & STAT_ERROR) { + err = dev_cmd_err[(int)readq(&devcmd->args[0])]; + printk(KERN_ERR "Error %d devcmd %d\n", + err, _CMD_N(cmd)); + return -err; + } + + if (_CMD_DIR(cmd) & _CMD_DIR_READ) { + rmb(); + *a0 = readq(&devcmd->args[0]); + *a1 = readq(&devcmd->args[1]); + } + + return 0; + } + } + + printk(KERN_ERR "Timedout devcmd %d\n", _CMD_N(cmd)); + return -ETIMEDOUT; +} + +int vnic_dev_fw_info(struct vnic_dev *vdev, + struct vnic_devcmd_fw_info **fw_info) +{ + u64 a0, a1 = 0; + int wait = 1000; + int err = 0; + + if (!vdev->fw_info) { + vdev->fw_info = pci_alloc_consistent(vdev->pdev, + sizeof(struct vnic_devcmd_fw_info), + &vdev->fw_info_pa); + if (!vdev->fw_info) + return -ENOMEM; + + a0 = vdev->fw_info_pa; + + /* only get fw_info once and cache it */ + err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO, &a0, &a1, wait); + } + + *fw_info = vdev->fw_info; + + return err; +} + +int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size, + void *value) +{ + u64 a0, a1; + int wait = 1000; + int err; + + a0 = offset; + a1 = size; + + err = vnic_dev_cmd(vdev, CMD_DEV_SPEC, &a0, &a1, wait); + + switch (size) { + case 1: + *(u8 *)value = (u8)a0; + break; + case 2: + *(u16 *)value = (u16)a0; + break; + case 4: + *(u32 *)value = (u32)a0; + break; + case 8: + *(u64 *)value = a0; + break; + default: + BUG(); + break; + } + + return err; +} + +int vnic_dev_stats_clear(struct vnic_dev *vdev) +{ + u64 a0 = 0, a1 = 0; + int wait = 1000; + return vnic_dev_cmd(vdev, CMD_STATS_CLEAR, &a0, &a1, wait); +} + +int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats) +{ + u64 a0, a1; + int wait = 1000; + + if (!vdev->stats) { + vdev->stats = pci_alloc_consistent(vdev->pdev, + sizeof(struct vnic_stats), &vdev->stats_pa); + if (!vdev->stats) + return -ENOMEM; + } + + *stats = vdev->stats; + a0 = vdev->stats_pa; + a1 = sizeof(struct vnic_stats); + + return vnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait); +} + +int vnic_dev_close(struct vnic_dev *vdev) +{ + u64 a0 = 0, a1 = 0; + int wait = 1000; + return vnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait); +} + +int vnic_dev_enable(struct vnic_dev *vdev) +{ + u64 a0 = 0, a1 = 0; + int wait = 1000; + return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait); +} + +int vnic_dev_disable(struct vnic_dev *vdev) +{ + u64 a0 = 0, a1 = 0; + int wait = 1000; + return vnic_dev_cmd(vdev, CMD_DISABLE, &a0, &a1, wait); +} + +int vnic_dev_open(struct vnic_dev *vdev, int arg) +{ + u64 a0 = (u32)arg, a1 = 0; + int wait = 1000; + return vnic_dev_cmd(vdev, CMD_OPEN, &a0, &a1, wait); +} + +int vnic_dev_open_done(struct vnic_dev *vdev, int *done) +{ + u64 a0 = 0, a1 = 0; + int wait = 1000; + int err; + + *done = 0; + + err = vnic_dev_cmd(vdev, CMD_OPEN_STATUS, &a0, &a1, wait); + if (err) + return err; + + *done = (a0 == 0); + + return 0; +} + +int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg) +{ + u64 a0 = (u32)arg, a1 = 0; + int wait = 1000; + return vnic_dev_cmd(vdev, CMD_SOFT_RESET, &a0, &a1, wait); +} + +int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done) +{ + u64 a0 = 0, a1 = 0; + int wait = 1000; + int err; + + *done = 0; + + err = vnic_dev_cmd(vdev, CMD_SOFT_RESET_STATUS, &a0, &a1, wait); + if (err) + return err; + + *done = (a0 == 0); + + return 0; +} + +int vnic_dev_hang_notify(struct vnic_dev *vdev) +{ + u64 a0, a1; + int wait = 1000; + return vnic_dev_cmd(vdev, CMD_HANG_NOTIFY, &a0, &a1, wait); +} + +int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr) +{ + u64 a0, a1; + int wait = 1000; + int err, i; + + for (i = 0; i < ETH_ALEN; i++) + mac_addr[i] = 0; + + err = vnic_dev_cmd(vdev, CMD_MAC_ADDR, &a0, &a1, wait); + if (err) + return err; + + for (i = 0; i < ETH_ALEN; i++) + mac_addr[i] = ((u8 *)&a0)[i]; + + return 0; +} + +void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast, + int broadcast, int promisc, int allmulti) +{ + u64 a0, a1 = 0; + int wait = 1000; + int err; + + a0 = (directed ? CMD_PFILTER_DIRECTED : 0) | + (multicast ? CMD_PFILTER_MULTICAST : 0) | + (broadcast ? CMD_PFILTER_BROADCAST : 0) | + (promisc ? CMD_PFILTER_PROMISCUOUS : 0) | + (allmulti ? CMD_PFILTER_ALL_MULTICAST : 0); + + err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait); + if (err) + printk(KERN_ERR "Can't set packet filter\n"); +} + +void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr) +{ + u64 a0 = 0, a1 = 0; + int wait = 1000; + int err; + int i; + + for (i = 0; i < ETH_ALEN; i++) + ((u8 *)&a0)[i] = addr[i]; + + err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait); + if (err) + printk(KERN_ERR + "Can't add addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n", + addr[0], addr[1], addr[2], addr[3], addr[4], addr[5], + err); +} + +void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr) +{ + u64 a0 = 0, a1 = 0; + int wait = 1000; + int err; + int i; + + for (i = 0; i < ETH_ALEN; i++) + ((u8 *)&a0)[i] = addr[i]; + + err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait); + if (err) + printk(KERN_ERR + "Can't del addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n", + addr[0], addr[1], addr[2], addr[3], addr[4], addr[5], + err); +} + +int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr) +{ + u64 a0, a1; + int wait = 1000; + + if (!vdev->notify) { + vdev->notify = pci_alloc_consistent(vdev->pdev, + sizeof(struct vnic_devcmd_notify), + &vdev->notify_pa); + if (!vdev->notify) + return -ENOMEM; + } + + a0 = vdev->notify_pa; + a1 = ((u64)intr << 32) & 0x0000ffff00000000ULL; + a1 += sizeof(struct vnic_devcmd_notify); + + return vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait); +} + +void vnic_dev_notify_unset(struct vnic_dev *vdev) +{ + u64 a0, a1; + int wait = 1000; + + a0 = 0; /* paddr = 0 to unset notify buffer */ + a1 = 0x0000ffff00000000ULL; /* intr num = -1 to unreg for intr */ + a1 += sizeof(struct vnic_devcmd_notify); + + vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait); +} + +static int vnic_dev_notify_ready(struct vnic_dev *vdev) +{ + u32 *words; + unsigned int nwords = sizeof(struct vnic_devcmd_notify) / 4; + unsigned int i; + u32 csum; + + if (!vdev->notify) + return 0; + + do { + csum = 0; + memcpy(&vdev->notify_copy, vdev->notify, + sizeof(struct vnic_devcmd_notify)); + words = (u32 *)&vdev->notify_copy; + for (i = 1; i < nwords; i++) + csum += words[i]; + } while (csum != words[0]); + + return 1; +} + +int vnic_dev_init(struct vnic_dev *vdev, int arg) +{ + u64 a0 = (u32)arg, a1 = 0; + int wait = 1000; + return vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait); +} + +int vnic_dev_link_status(struct vnic_dev *vdev) +{ + if (vdev->linkstatus) + return *vdev->linkstatus; + + if (!vnic_dev_notify_ready(vdev)) + return 0; + + return vdev->notify_copy.link_state; +} + +u32 vnic_dev_port_speed(struct vnic_dev *vdev) +{ + if (!vnic_dev_notify_ready(vdev)) + return 0; + + return vdev->notify_copy.port_speed; +} + +u32 vnic_dev_msg_lvl(struct vnic_dev *vdev) +{ + if (!vnic_dev_notify_ready(vdev)) + return 0; + + return vdev->notify_copy.msglvl; +} + +u32 vnic_dev_mtu(struct vnic_dev *vdev) +{ + if (!vnic_dev_notify_ready(vdev)) + return 0; + + return vdev->notify_copy.mtu; +} + +u32 vnic_dev_link_down_cnt(struct vnic_dev *vdev) +{ + if (!vnic_dev_notify_ready(vdev)) + return 0; + + return vdev->notify_copy.link_down_cnt; +} + +void vnic_dev_set_intr_mode(struct vnic_dev *vdev, + enum vnic_dev_intr_mode intr_mode) +{ + vdev->intr_mode = intr_mode; +} + +enum vnic_dev_intr_mode vnic_dev_get_intr_mode( + struct vnic_dev *vdev) +{ + return vdev->intr_mode; +} + +void vnic_dev_unregister(struct vnic_dev *vdev) +{ + if (vdev) { + if (vdev->notify) + pci_free_consistent(vdev->pdev, + sizeof(struct vnic_devcmd_notify), + vdev->notify, + vdev->notify_pa); + if (vdev->linkstatus) + pci_free_consistent(vdev->pdev, + sizeof(u32), + vdev->linkstatus, + vdev->linkstatus_pa); + if (vdev->stats) + pci_free_consistent(vdev->pdev, + sizeof(struct vnic_dev), + vdev->stats, vdev->stats_pa); + if (vdev->fw_info) + pci_free_consistent(vdev->pdev, + sizeof(struct vnic_devcmd_fw_info), + vdev->fw_info, vdev->fw_info_pa); + kfree(vdev); + } +} + +struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev, + void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar) +{ + if (!vdev) { + vdev = kzalloc(sizeof(struct vnic_dev), GFP_KERNEL); + if (!vdev) + return NULL; + } + + vdev->priv = priv; + vdev->pdev = pdev; + + if (vnic_dev_discover_res(vdev, bar)) + goto err_out; + + vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0); + if (!vdev->devcmd) + goto err_out; + + return vdev; + +err_out: + vnic_dev_unregister(vdev); + return NULL; +} diff --git a/drivers/scsi/fnic/vnic_dev.h b/drivers/scsi/fnic/vnic_dev.h new file mode 100644 index 00000000000..f9935a8a5a0 --- /dev/null +++ b/drivers/scsi/fnic/vnic_dev.h @@ -0,0 +1,161 @@ +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef _VNIC_DEV_H_ +#define _VNIC_DEV_H_ + +#include "vnic_resource.h" +#include "vnic_devcmd.h" + +/* + * These defines avoid symbol clash between fnic and enic (Cisco 10G Eth + * Driver) when both are built with CONFIG options =y + */ +#define vnic_dev_priv fnic_dev_priv +#define vnic_dev_get_res_count fnic_dev_get_res_count +#define vnic_dev_get_res fnic_dev_get_res +#define vnic_dev_desc_ring_size fnic_dev_desc_ring_siz +#define vnic_dev_clear_desc_ring fnic_dev_clear_desc_ring +#define vnic_dev_alloc_desc_ring fnic_dev_alloc_desc_ring +#define vnic_dev_free_desc_ring fnic_dev_free_desc_ring +#define vnic_dev_cmd fnic_dev_cmd +#define vnic_dev_fw_info fnic_dev_fw_info +#define vnic_dev_spec fnic_dev_spec +#define vnic_dev_stats_clear fnic_dev_stats_clear +#define vnic_dev_stats_dump fnic_dev_stats_dump +#define vnic_dev_hang_notify fnic_dev_hang_notify +#define vnic_dev_packet_filter fnic_dev_packet_filter +#define vnic_dev_add_addr fnic_dev_add_addr +#define vnic_dev_del_addr fnic_dev_del_addr +#define vnic_dev_mac_addr fnic_dev_mac_addr +#define vnic_dev_notify_set fnic_dev_notify_set +#define vnic_dev_notify_unset fnic_dev_notify_unset +#define vnic_dev_link_status fnic_dev_link_status +#define vnic_dev_port_speed fnic_dev_port_speed +#define vnic_dev_msg_lvl fnic_dev_msg_lvl +#define vnic_dev_mtu fnic_dev_mtu +#define vnic_dev_link_down_cnt fnic_dev_link_down_cnt +#define vnic_dev_close fnic_dev_close +#define vnic_dev_enable fnic_dev_enable +#define vnic_dev_disable fnic_dev_disable +#define vnic_dev_open fnic_dev_open +#define vnic_dev_open_done fnic_dev_open_done +#define vnic_dev_init fnic_dev_init +#define vnic_dev_soft_reset fnic_dev_soft_reset +#define vnic_dev_soft_reset_done fnic_dev_soft_reset_done +#define vnic_dev_set_intr_mode fnic_dev_set_intr_mode +#define vnic_dev_get_intr_mode fnic_dev_get_intr_mode +#define vnic_dev_unregister fnic_dev_unregister +#define vnic_dev_register fnic_dev_register + +#ifndef VNIC_PADDR_TARGET +#define VNIC_PADDR_TARGET 0x0000000000000000ULL +#endif + +#ifndef readq +static inline u64 readq(void __iomem *reg) +{ + return ((u64)readl(reg + 0x4UL) << 32) | (u64)readl(reg); +} + +static inline void writeq(u64 val, void __iomem *reg) +{ + writel(val & 0xffffffff, reg); + writel(val >> 32, reg + 0x4UL); +} +#endif + +enum vnic_dev_intr_mode { + VNIC_DEV_INTR_MODE_UNKNOWN, + VNIC_DEV_INTR_MODE_INTX, + VNIC_DEV_INTR_MODE_MSI, + VNIC_DEV_INTR_MODE_MSIX, +}; + +struct vnic_dev_bar { + void __iomem *vaddr; + dma_addr_t bus_addr; + unsigned long len; +}; + +struct vnic_dev_ring { + void *descs; + size_t size; + dma_addr_t base_addr; + size_t base_align; + void *descs_unaligned; + size_t size_unaligned; + dma_addr_t base_addr_unaligned; + unsigned int desc_size; + unsigned int desc_count; + unsigned int desc_avail; +}; + +struct vnic_dev; +struct vnic_stats; + +void *vnic_dev_priv(struct vnic_dev *vdev); +unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev, + enum vnic_res_type type); +void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type, + unsigned int index); +unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring, + unsigned int desc_count, + unsigned int desc_size); +void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring); +int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring, + unsigned int desc_count, unsigned int desc_size); +void vnic_dev_free_desc_ring(struct vnic_dev *vdev, + struct vnic_dev_ring *ring); +int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, + u64 *a0, u64 *a1, int wait); +int vnic_dev_fw_info(struct vnic_dev *vdev, + struct vnic_devcmd_fw_info **fw_info); +int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, + unsigned int size, void *value); +int vnic_dev_stats_clear(struct vnic_dev *vdev); +int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats); +int vnic_dev_hang_notify(struct vnic_dev *vdev); +void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast, + int broadcast, int promisc, int allmulti); +void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr); +void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr); +int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr); +int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr); +void vnic_dev_notify_unset(struct vnic_dev *vdev); +int vnic_dev_link_status(struct vnic_dev *vdev); +u32 vnic_dev_port_speed(struct vnic_dev *vdev); +u32 vnic_dev_msg_lvl(struct vnic_dev *vdev); +u32 vnic_dev_mtu(struct vnic_dev *vdev); +u32 vnic_dev_link_down_cnt(struct vnic_dev *vdev); +int vnic_dev_close(struct vnic_dev *vdev); +int vnic_dev_enable(struct vnic_dev *vdev); +int vnic_dev_disable(struct vnic_dev *vdev); +int vnic_dev_open(struct vnic_dev *vdev, int arg); +int vnic_dev_open_done(struct vnic_dev *vdev, int *done); +int vnic_dev_init(struct vnic_dev *vdev, int arg); +int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg); +int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done); +void vnic_dev_set_intr_mode(struct vnic_dev *vdev, + enum vnic_dev_intr_mode intr_mode); +enum vnic_dev_intr_mode vnic_dev_get_intr_mode(struct vnic_dev *vdev); +void vnic_dev_unregister(struct vnic_dev *vdev); +struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev, + void *priv, struct pci_dev *pdev, + struct vnic_dev_bar *bar); + +#endif /* _VNIC_DEV_H_ */ diff --git a/drivers/scsi/fnic/vnic_devcmd.h b/drivers/scsi/fnic/vnic_devcmd.h new file mode 100644 index 00000000000..d62b9061bf1 --- /dev/null +++ b/drivers/scsi/fnic/vnic_devcmd.h @@ -0,0 +1,281 @@ +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef _VNIC_DEVCMD_H_ +#define _VNIC_DEVCMD_H_ + +#define _CMD_NBITS 14 +#define _CMD_VTYPEBITS 10 +#define _CMD_FLAGSBITS 6 +#define _CMD_DIRBITS 2 + +#define _CMD_NMASK ((1 << _CMD_NBITS)-1) +#define _CMD_VTYPEMASK ((1 << _CMD_VTYPEBITS)-1) +#define _CMD_FLAGSMASK ((1 << _CMD_FLAGSBITS)-1) +#define _CMD_DIRMASK ((1 << _CMD_DIRBITS)-1) + +#define _CMD_NSHIFT 0 +#define _CMD_VTYPESHIFT (_CMD_NSHIFT+_CMD_NBITS) +#define _CMD_FLAGSSHIFT (_CMD_VTYPESHIFT+_CMD_VTYPEBITS) +#define _CMD_DIRSHIFT (_CMD_FLAGSSHIFT+_CMD_FLAGSBITS) + +/* + * Direction bits (from host perspective). + */ +#define _CMD_DIR_NONE 0U +#define _CMD_DIR_WRITE 1U +#define _CMD_DIR_READ 2U +#define _CMD_DIR_RW (_CMD_DIR_WRITE | _CMD_DIR_READ) + +/* + * Flag bits. + */ +#define _CMD_FLAGS_NONE 0U +#define _CMD_FLAGS_NOWAIT 1U + +/* + * vNIC type bits. + */ +#define _CMD_VTYPE_NONE 0U +#define _CMD_VTYPE_ENET 1U +#define _CMD_VTYPE_FC 2U +#define _CMD_VTYPE_SCSI 4U +#define _CMD_VTYPE_ALL (_CMD_VTYPE_ENET | _CMD_VTYPE_FC | _CMD_VTYPE_SCSI) + +/* + * Used to create cmds.. +*/ +#define _CMDCF(dir, flags, vtype, nr) \ + (((dir) << _CMD_DIRSHIFT) | \ + ((flags) << _CMD_FLAGSSHIFT) | \ + ((vtype) << _CMD_VTYPESHIFT) | \ + ((nr) << _CMD_NSHIFT)) +#define _CMDC(dir, vtype, nr) _CMDCF(dir, 0, vtype, nr) +#define _CMDCNW(dir, vtype, nr) _CMDCF(dir, _CMD_FLAGS_NOWAIT, vtype, nr) + +/* + * Used to decode cmds.. +*/ +#define _CMD_DIR(cmd) (((cmd) >> _CMD_DIRSHIFT) & _CMD_DIRMASK) +#define _CMD_FLAGS(cmd) (((cmd) >> _CMD_FLAGSSHIFT) & _CMD_FLAGSMASK) +#define _CMD_VTYPE(cmd) (((cmd) >> _CMD_VTYPESHIFT) & _CMD_VTYPEMASK) +#define _CMD_N(cmd) (((cmd) >> _CMD_NSHIFT) & _CMD_NMASK) + +enum vnic_devcmd_cmd { + CMD_NONE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_NONE, 0), + + /* mcpu fw info in mem: (u64)a0=paddr to struct vnic_devcmd_fw_info */ + CMD_MCPU_FW_INFO = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 1), + + /* dev-specific block member: + * in: (u16)a0=offset,(u8)a1=size + * out: a0=value */ + CMD_DEV_SPEC = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 2), + + /* stats clear */ + CMD_STATS_CLEAR = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 3), + + /* stats dump in mem: (u64)a0=paddr to stats area, + * (u16)a1=sizeof stats area */ + CMD_STATS_DUMP = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 4), + + /* set Rx packet filter: (u32)a0=filters (see CMD_PFILTER_*) */ + CMD_PACKET_FILTER = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 7), + + /* hang detection notification */ + CMD_HANG_NOTIFY = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 8), + + /* MAC address in (u48)a0 */ + CMD_MAC_ADDR = _CMDC(_CMD_DIR_READ, + _CMD_VTYPE_ENET | _CMD_VTYPE_FC, 9), + + /* disable/enable promisc mode: (u8)a0=0/1 */ +/***** XXX DEPRECATED *****/ + CMD_PROMISC_MODE = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 10), + + /* disable/enable all-multi mode: (u8)a0=0/1 */ +/***** XXX DEPRECATED *****/ + CMD_ALLMULTI_MODE = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 11), + + /* add addr from (u48)a0 */ + CMD_ADDR_ADD = _CMDCNW(_CMD_DIR_WRITE, + _CMD_VTYPE_ENET | _CMD_VTYPE_FC, 12), + + /* del addr from (u48)a0 */ + CMD_ADDR_DEL = _CMDCNW(_CMD_DIR_WRITE, + _CMD_VTYPE_ENET | _CMD_VTYPE_FC, 13), + + /* add VLAN id in (u16)a0 */ + CMD_VLAN_ADD = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 14), + + /* del VLAN id in (u16)a0 */ + CMD_VLAN_DEL = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 15), + + /* nic_cfg in (u32)a0 */ + CMD_NIC_CFG = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 16), + + /* union vnic_rss_key in mem: (u64)a0=paddr, (u16)a1=len */ + CMD_RSS_KEY = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 17), + + /* union vnic_rss_cpu in mem: (u64)a0=paddr, (u16)a1=len */ + CMD_RSS_CPU = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 18), + + /* initiate softreset */ + CMD_SOFT_RESET = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 19), + + /* softreset status: + * out: a0=0 reset complete, a0=1 reset in progress */ + CMD_SOFT_RESET_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 20), + + /* set struct vnic_devcmd_notify buffer in mem: + * in: + * (u64)a0=paddr to notify (set paddr=0 to unset) + * (u32)a1 & 0x00000000ffffffff=sizeof(struct vnic_devcmd_notify) + * (u16)a1 & 0x0000ffff00000000=intr num (-1 for no intr) + * out: + * (u32)a1 = effective size + */ + CMD_NOTIFY = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 21), + + /* UNDI API: (u64)a0=paddr to s_PXENV_UNDI_ struct, + * (u8)a1=PXENV_UNDI_xxx */ + CMD_UNDI = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 22), + + /* initiate open sequence (u32)a0=flags (see CMD_OPENF_*) */ + CMD_OPEN = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 23), + + /* open status: + * out: a0=0 open complete, a0=1 open in progress */ + CMD_OPEN_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 24), + + /* close vnic */ + CMD_CLOSE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 25), + + /* initialize virtual link: (u32)a0=flags (see CMD_INITF_*) */ + CMD_INIT = _CMDCNW(_CMD_DIR_READ, _CMD_VTYPE_ALL, 26), + + /* variant of CMD_INIT, with provisioning info + * (u64)a0=paddr of vnic_devcmd_provinfo + * (u32)a1=sizeof provision info */ + CMD_INIT_PROV_INFO = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 27), + + /* enable virtual link */ + CMD_ENABLE = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 28), + + /* disable virtual link */ + CMD_DISABLE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 29), + + /* stats dump all vnics on uplink in mem: (u64)a0=paddr (u32)a1=uif */ + CMD_STATS_DUMP_ALL = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 30), + + /* init status: + * out: a0=0 init complete, a0=1 init in progress + * if a0=0, a1=errno */ + CMD_INIT_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 31), + + /* INT13 API: (u64)a0=paddr to vnic_int13_params struct + * (u8)a1=INT13_CMD_xxx */ + CMD_INT13 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_FC, 32), + + /* logical uplink enable/disable: (u64)a0: 0/1=disable/enable */ + CMD_LOGICAL_UPLINK = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 33), + + /* undo initialize of virtual link */ + CMD_DEINIT = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 34), +}; + +/* flags for CMD_OPEN */ +#define CMD_OPENF_OPROM 0x1 /* open coming from option rom */ + +/* flags for CMD_INIT */ +#define CMD_INITF_DEFAULT_MAC 0x1 /* init with default mac addr */ + +/* flags for CMD_PACKET_FILTER */ +#define CMD_PFILTER_DIRECTED 0x01 +#define CMD_PFILTER_MULTICAST 0x02 +#define CMD_PFILTER_BROADCAST 0x04 +#define CMD_PFILTER_PROMISCUOUS 0x08 +#define CMD_PFILTER_ALL_MULTICAST 0x10 + +enum vnic_devcmd_status { + STAT_NONE = 0, + STAT_BUSY = 1 << 0, /* cmd in progress */ + STAT_ERROR = 1 << 1, /* last cmd caused error (code in a0) */ +}; + +enum vnic_devcmd_error { + ERR_SUCCESS = 0, + ERR_EINVAL = 1, + ERR_EFAULT = 2, + ERR_EPERM = 3, + ERR_EBUSY = 4, + ERR_ECMDUNKNOWN = 5, + ERR_EBADSTATE = 6, + ERR_ENOMEM = 7, + ERR_ETIMEDOUT = 8, + ERR_ELINKDOWN = 9, +}; + +struct vnic_devcmd_fw_info { + char fw_version[32]; + char fw_build[32]; + char hw_version[32]; + char hw_serial_number[32]; +}; + +struct vnic_devcmd_notify { + u32 csum; /* checksum over following words */ + + u32 link_state; /* link up == 1 */ + u32 port_speed; /* effective port speed (rate limit) */ + u32 mtu; /* MTU */ + u32 msglvl; /* requested driver msg lvl */ + u32 uif; /* uplink interface */ + u32 status; /* status bits (see VNIC_STF_*) */ + u32 error; /* error code (see ERR_*) for first ERR */ + u32 link_down_cnt; /* running count of link down transitions */ +}; +#define VNIC_STF_FATAL_ERR 0x0001 /* fatal fw error */ + +struct vnic_devcmd_provinfo { + u8 oui[3]; + u8 type; + u8 data[0]; +}; + +/* + * Writing cmd register causes STAT_BUSY to get set in status register. + * When cmd completes, STAT_BUSY will be cleared. + * + * If cmd completed successfully STAT_ERROR will be clear + * and args registers contain cmd-specific results. + * + * If cmd error, STAT_ERROR will be set and args[0] contains error code. + * + * status register is read-only. While STAT_BUSY is set, + * all other register contents are read-only. + */ + +/* Make sizeof(vnic_devcmd) a power-of-2 for I/O BAR. */ +#define VNIC_DEVCMD_NARGS 15 +struct vnic_devcmd { + u32 status; /* RO */ + u32 cmd; /* RW */ + u64 args[VNIC_DEVCMD_NARGS]; /* RW cmd args (little-endian) */ +}; + +#endif /* _VNIC_DEVCMD_H_ */ diff --git a/drivers/scsi/fnic/vnic_intr.c b/drivers/scsi/fnic/vnic_intr.c new file mode 100644 index 00000000000..4f4dc8793d2 --- /dev/null +++ b/drivers/scsi/fnic/vnic_intr.c @@ -0,0 +1,60 @@ +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include "vnic_dev.h" +#include "vnic_intr.h" + +void vnic_intr_free(struct vnic_intr *intr) +{ + intr->ctrl = NULL; +} + +int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr, + unsigned int index) +{ + intr->index = index; + intr->vdev = vdev; + + intr->ctrl = vnic_dev_get_res(vdev, RES_TYPE_INTR_CTRL, index); + if (!intr->ctrl) { + printk(KERN_ERR "Failed to hook INTR[%d].ctrl resource\n", + index); + return -EINVAL; + } + + return 0; +} + +void vnic_intr_init(struct vnic_intr *intr, unsigned int coalescing_timer, + unsigned int coalescing_type, unsigned int mask_on_assertion) +{ + iowrite32(coalescing_timer, &intr->ctrl->coalescing_timer); + iowrite32(coalescing_type, &intr->ctrl->coalescing_type); + iowrite32(mask_on_assertion, &intr->ctrl->mask_on_assertion); + iowrite32(0, &intr->ctrl->int_credits); +} + +void vnic_intr_clean(struct vnic_intr *intr) +{ + iowrite32(0, &intr->ctrl->int_credits); +} diff --git a/drivers/scsi/fnic/vnic_intr.h b/drivers/scsi/fnic/vnic_intr.h new file mode 100644 index 00000000000..d5fb40e7c98 --- /dev/null +++ b/drivers/scsi/fnic/vnic_intr.h @@ -0,0 +1,118 @@ +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef _VNIC_INTR_H_ +#define _VNIC_INTR_H_ + +#include +#include "vnic_dev.h" + +/* + * These defines avoid symbol clash between fnic and enic (Cisco 10G Eth + * Driver) when both are built with CONFIG options =y + */ +#define vnic_intr_unmask fnic_intr_unmask +#define vnic_intr_mask fnic_intr_mask +#define vnic_intr_return_credits fnic_intr_return_credits +#define vnic_intr_credits fnic_intr_credits +#define vnic_intr_return_all_credits fnic_intr_return_all_credits +#define vnic_intr_legacy_pba fnic_intr_legacy_pba +#define vnic_intr_free fnic_intr_free +#define vnic_intr_alloc fnic_intr_alloc +#define vnic_intr_init fnic_intr_init +#define vnic_intr_clean fnic_intr_clean + +#define VNIC_INTR_TIMER_MAX 0xffff + +#define VNIC_INTR_TIMER_TYPE_ABS 0 +#define VNIC_INTR_TIMER_TYPE_QUIET 1 + +/* Interrupt control */ +struct vnic_intr_ctrl { + u32 coalescing_timer; /* 0x00 */ + u32 pad0; + u32 coalescing_value; /* 0x08 */ + u32 pad1; + u32 coalescing_type; /* 0x10 */ + u32 pad2; + u32 mask_on_assertion; /* 0x18 */ + u32 pad3; + u32 mask; /* 0x20 */ + u32 pad4; + u32 int_credits; /* 0x28 */ + u32 pad5; + u32 int_credit_return; /* 0x30 */ + u32 pad6; +}; + +struct vnic_intr { + unsigned int index; + struct vnic_dev *vdev; + struct vnic_intr_ctrl __iomem *ctrl; /* memory-mapped */ +}; + +static inline void vnic_intr_unmask(struct vnic_intr *intr) +{ + iowrite32(0, &intr->ctrl->mask); +} + +static inline void vnic_intr_mask(struct vnic_intr *intr) +{ + iowrite32(1, &intr->ctrl->mask); +} + +static inline void vnic_intr_return_credits(struct vnic_intr *intr, + unsigned int credits, int unmask, int reset_timer) +{ +#define VNIC_INTR_UNMASK_SHIFT 16 +#define VNIC_INTR_RESET_TIMER_SHIFT 17 + + u32 int_credit_return = (credits & 0xffff) | + (unmask ? (1 << VNIC_INTR_UNMASK_SHIFT) : 0) | + (reset_timer ? (1 << VNIC_INTR_RESET_TIMER_SHIFT) : 0); + + iowrite32(int_credit_return, &intr->ctrl->int_credit_return); +} + +static inline unsigned int vnic_intr_credits(struct vnic_intr *intr) +{ + return ioread32(&intr->ctrl->int_credits); +} + +static inline void vnic_intr_return_all_credits(struct vnic_intr *intr) +{ + unsigned int credits = vnic_intr_credits(intr); + int unmask = 1; + int reset_timer = 1; + + vnic_intr_return_credits(intr, credits, unmask, reset_timer); +} + +static inline u32 vnic_intr_legacy_pba(u32 __iomem *legacy_pba) +{ + /* read PBA without clearing */ + return ioread32(legacy_pba); +} + +void vnic_intr_free(struct vnic_intr *intr); +int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr, + unsigned int index); +void vnic_intr_init(struct vnic_intr *intr, unsigned int coalescing_timer, + unsigned int coalescing_type, unsigned int mask_on_assertion); +void vnic_intr_clean(struct vnic_intr *intr); + +#endif /* _VNIC_INTR_H_ */ diff --git a/drivers/scsi/fnic/vnic_nic.h b/drivers/scsi/fnic/vnic_nic.h new file mode 100644 index 00000000000..f15b83eeace --- /dev/null +++ b/drivers/scsi/fnic/vnic_nic.h @@ -0,0 +1,69 @@ +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef _VNIC_NIC_H_ +#define _VNIC_NIC_H_ + +/* + * These defines avoid symbol clash between fnic and enic (Cisco 10G Eth + * Driver) when both are built with CONFIG options =y + */ +#define vnic_set_nic_cfg fnic_set_nic_cfg + +#define NIC_CFG_RSS_DEFAULT_CPU_MASK_FIELD 0xffUL +#define NIC_CFG_RSS_DEFAULT_CPU_SHIFT 0 +#define NIC_CFG_RSS_HASH_TYPE (0xffUL << 8) +#define NIC_CFG_RSS_HASH_TYPE_MASK_FIELD 0xffUL +#define NIC_CFG_RSS_HASH_TYPE_SHIFT 8 +#define NIC_CFG_RSS_HASH_BITS (7UL << 16) +#define NIC_CFG_RSS_HASH_BITS_MASK_FIELD 7UL +#define NIC_CFG_RSS_HASH_BITS_SHIFT 16 +#define NIC_CFG_RSS_BASE_CPU (7UL << 19) +#define NIC_CFG_RSS_BASE_CPU_MASK_FIELD 7UL +#define NIC_CFG_RSS_BASE_CPU_SHIFT 19 +#define NIC_CFG_RSS_ENABLE (1UL << 22) +#define NIC_CFG_RSS_ENABLE_MASK_FIELD 1UL +#define NIC_CFG_RSS_ENABLE_SHIFT 22 +#define NIC_CFG_TSO_IPID_SPLIT_EN (1UL << 23) +#define NIC_CFG_TSO_IPID_SPLIT_EN_MASK_FIELD 1UL +#define NIC_CFG_TSO_IPID_SPLIT_EN_SHIFT 23 +#define NIC_CFG_IG_VLAN_STRIP_EN (1UL << 24) +#define NIC_CFG_IG_VLAN_STRIP_EN_MASK_FIELD 1UL +#define NIC_CFG_IG_VLAN_STRIP_EN_SHIFT 24 + +static inline void vnic_set_nic_cfg(u32 *nic_cfg, + u8 rss_default_cpu, u8 rss_hash_type, + u8 rss_hash_bits, u8 rss_base_cpu, + u8 rss_enable, u8 tso_ipid_split_en, + u8 ig_vlan_strip_en) +{ + *nic_cfg = (rss_default_cpu & NIC_CFG_RSS_DEFAULT_CPU_MASK_FIELD) | + ((rss_hash_type & NIC_CFG_RSS_HASH_TYPE_MASK_FIELD) + << NIC_CFG_RSS_HASH_TYPE_SHIFT) | + ((rss_hash_bits & NIC_CFG_RSS_HASH_BITS_MASK_FIELD) + << NIC_CFG_RSS_HASH_BITS_SHIFT) | + ((rss_base_cpu & NIC_CFG_RSS_BASE_CPU_MASK_FIELD) + << NIC_CFG_RSS_BASE_CPU_SHIFT) | + ((rss_enable & NIC_CFG_RSS_ENABLE_MASK_FIELD) + << NIC_CFG_RSS_ENABLE_SHIFT) | + ((tso_ipid_split_en & NIC_CFG_TSO_IPID_SPLIT_EN_MASK_FIELD) + << NIC_CFG_TSO_IPID_SPLIT_EN_SHIFT) | + ((ig_vlan_strip_en & NIC_CFG_IG_VLAN_STRIP_EN_MASK_FIELD) + << NIC_CFG_IG_VLAN_STRIP_EN_SHIFT); +} + +#endif /* _VNIC_NIC_H_ */ diff --git a/drivers/scsi/fnic/vnic_resource.h b/drivers/scsi/fnic/vnic_resource.h new file mode 100644 index 00000000000..2d842f79d41 --- /dev/null +++ b/drivers/scsi/fnic/vnic_resource.h @@ -0,0 +1,61 @@ +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef _VNIC_RESOURCE_H_ +#define _VNIC_RESOURCE_H_ + +#define VNIC_RES_MAGIC 0x766E6963L /* 'vnic' */ +#define VNIC_RES_VERSION 0x00000000L + +/* vNIC resource types */ +enum vnic_res_type { + RES_TYPE_EOL, /* End-of-list */ + RES_TYPE_WQ, /* Work queues */ + RES_TYPE_RQ, /* Receive queues */ + RES_TYPE_CQ, /* Completion queues */ + RES_TYPE_RSVD1, + RES_TYPE_NIC_CFG, /* Enet NIC config registers */ + RES_TYPE_RSVD2, + RES_TYPE_RSVD3, + RES_TYPE_RSVD4, + RES_TYPE_RSVD5, + RES_TYPE_INTR_CTRL, /* Interrupt ctrl table */ + RES_TYPE_INTR_TABLE, /* MSI/MSI-X Interrupt table */ + RES_TYPE_INTR_PBA, /* MSI/MSI-X PBA table */ + RES_TYPE_INTR_PBA_LEGACY, /* Legacy intr status */ + RES_TYPE_RSVD6, + RES_TYPE_RSVD7, + RES_TYPE_DEVCMD, /* Device command region */ + RES_TYPE_PASS_THRU_PAGE, /* Pass-thru page */ + + RES_TYPE_MAX, /* Count of resource types */ +}; + +struct vnic_resource_header { + u32 magic; + u32 version; +}; + +struct vnic_resource { + u8 type; + u8 bar; + u8 pad[2]; + u32 bar_offset; + u32 count; +}; + +#endif /* _VNIC_RESOURCE_H_ */ diff --git a/drivers/scsi/fnic/vnic_rq.c b/drivers/scsi/fnic/vnic_rq.c new file mode 100644 index 00000000000..bedd0d28563 --- /dev/null +++ b/drivers/scsi/fnic/vnic_rq.c @@ -0,0 +1,196 @@ +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include "vnic_dev.h" +#include "vnic_rq.h" + +static int vnic_rq_alloc_bufs(struct vnic_rq *rq) +{ + struct vnic_rq_buf *buf; + struct vnic_dev *vdev; + unsigned int i, j, count = rq->ring.desc_count; + unsigned int blks = VNIC_RQ_BUF_BLKS_NEEDED(count); + + vdev = rq->vdev; + + for (i = 0; i < blks; i++) { + rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ, GFP_ATOMIC); + if (!rq->bufs[i]) { + printk(KERN_ERR "Failed to alloc rq_bufs\n"); + return -ENOMEM; + } + } + + for (i = 0; i < blks; i++) { + buf = rq->bufs[i]; + for (j = 0; j < VNIC_RQ_BUF_BLK_ENTRIES; j++) { + buf->index = i * VNIC_RQ_BUF_BLK_ENTRIES + j; + buf->desc = (u8 *)rq->ring.descs + + rq->ring.desc_size * buf->index; + if (buf->index + 1 == count) { + buf->next = rq->bufs[0]; + break; + } else if (j + 1 == VNIC_RQ_BUF_BLK_ENTRIES) { + buf->next = rq->bufs[i + 1]; + } else { + buf->next = buf + 1; + buf++; + } + } + } + + rq->to_use = rq->to_clean = rq->bufs[0]; + rq->buf_index = 0; + + return 0; +} + +void vnic_rq_free(struct vnic_rq *rq) +{ + struct vnic_dev *vdev; + unsigned int i; + + vdev = rq->vdev; + + vnic_dev_free_desc_ring(vdev, &rq->ring); + + for (i = 0; i < VNIC_RQ_BUF_BLKS_MAX; i++) { + kfree(rq->bufs[i]); + rq->bufs[i] = NULL; + } + + rq->ctrl = NULL; +} + +int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index, + unsigned int desc_count, unsigned int desc_size) +{ + int err; + + rq->index = index; + rq->vdev = vdev; + + rq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_RQ, index); + if (!rq->ctrl) { + printk(KERN_ERR "Failed to hook RQ[%d] resource\n", index); + return -EINVAL; + } + + vnic_rq_disable(rq); + + err = vnic_dev_alloc_desc_ring(vdev, &rq->ring, desc_count, desc_size); + if (err) + return err; + + err = vnic_rq_alloc_bufs(rq); + if (err) { + vnic_rq_free(rq); + return err; + } + + return 0; +} + +void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index, + unsigned int error_interrupt_enable, + unsigned int error_interrupt_offset) +{ + u64 paddr; + u32 fetch_index; + + paddr = (u64)rq->ring.base_addr | VNIC_PADDR_TARGET; + writeq(paddr, &rq->ctrl->ring_base); + iowrite32(rq->ring.desc_count, &rq->ctrl->ring_size); + iowrite32(cq_index, &rq->ctrl->cq_index); + iowrite32(error_interrupt_enable, &rq->ctrl->error_interrupt_enable); + iowrite32(error_interrupt_offset, &rq->ctrl->error_interrupt_offset); + iowrite32(0, &rq->ctrl->dropped_packet_count); + iowrite32(0, &rq->ctrl->error_status); + + /* Use current fetch_index as the ring starting point */ + fetch_index = ioread32(&rq->ctrl->fetch_index); + rq->to_use = rq->to_clean = + &rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES] + [fetch_index % VNIC_RQ_BUF_BLK_ENTRIES]; + iowrite32(fetch_index, &rq->ctrl->posted_index); + + rq->buf_index = 0; +} + +unsigned int vnic_rq_error_status(struct vnic_rq *rq) +{ + return ioread32(&rq->ctrl->error_status); +} + +void vnic_rq_enable(struct vnic_rq *rq) +{ + iowrite32(1, &rq->ctrl->enable); +} + +int vnic_rq_disable(struct vnic_rq *rq) +{ + unsigned int wait; + + iowrite32(0, &rq->ctrl->enable); + + /* Wait for HW to ACK disable request */ + for (wait = 0; wait < 100; wait++) { + if (!(ioread32(&rq->ctrl->running))) + return 0; + udelay(1); + } + + printk(KERN_ERR "Failed to disable RQ[%d]\n", rq->index); + + return -ETIMEDOUT; +} + +void vnic_rq_clean(struct vnic_rq *rq, + void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf)) +{ + struct vnic_rq_buf *buf; + u32 fetch_index; + + BUG_ON(ioread32(&rq->ctrl->enable)); + + buf = rq->to_clean; + + while (vnic_rq_desc_used(rq) > 0) { + + (*buf_clean)(rq, buf); + + buf = rq->to_clean = buf->next; + rq->ring.desc_avail++; + } + + /* Use current fetch_index as the ring starting point */ + fetch_index = ioread32(&rq->ctrl->fetch_index); + rq->to_use = rq->to_clean = + &rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES] + [fetch_index % VNIC_RQ_BUF_BLK_ENTRIES]; + iowrite32(fetch_index, &rq->ctrl->posted_index); + + rq->buf_index = 0; + + vnic_dev_clear_desc_ring(&rq->ring); +} + diff --git a/drivers/scsi/fnic/vnic_rq.h b/drivers/scsi/fnic/vnic_rq.h new file mode 100644 index 00000000000..aebdfbd6ad3 --- /dev/null +++ b/drivers/scsi/fnic/vnic_rq.h @@ -0,0 +1,235 @@ +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef _VNIC_RQ_H_ +#define _VNIC_RQ_H_ + +#include +#include "vnic_dev.h" +#include "vnic_cq.h" + +/* + * These defines avoid symbol clash between fnic and enic (Cisco 10G Eth + * Driver) when both are built with CONFIG options =y + */ +#define vnic_rq_desc_avail fnic_rq_desc_avail +#define vnic_rq_desc_used fnic_rq_desc_used +#define vnic_rq_next_desc fnic_rq_next_desc +#define vnic_rq_next_index fnic_rq_next_index +#define vnic_rq_next_buf_index fnic_rq_next_buf_index +#define vnic_rq_post fnic_rq_post +#define vnic_rq_posting_soon fnic_rq_posting_soon +#define vnic_rq_return_descs fnic_rq_return_descs +#define vnic_rq_service fnic_rq_service +#define vnic_rq_fill fnic_rq_fill +#define vnic_rq_free fnic_rq_free +#define vnic_rq_alloc fnic_rq_alloc +#define vnic_rq_init fnic_rq_init +#define vnic_rq_error_status fnic_rq_error_status +#define vnic_rq_enable fnic_rq_enable +#define vnic_rq_disable fnic_rq_disable +#define vnic_rq_clean fnic_rq_clean + +/* Receive queue control */ +struct vnic_rq_ctrl { + u64 ring_base; /* 0x00 */ + u32 ring_size; /* 0x08 */ + u32 pad0; + u32 posted_index; /* 0x10 */ + u32 pad1; + u32 cq_index; /* 0x18 */ + u32 pad2; + u32 enable; /* 0x20 */ + u32 pad3; + u32 running; /* 0x28 */ + u32 pad4; + u32 fetch_index; /* 0x30 */ + u32 pad5; + u32 error_interrupt_enable; /* 0x38 */ + u32 pad6; + u32 error_interrupt_offset; /* 0x40 */ + u32 pad7; + u32 error_status; /* 0x48 */ + u32 pad8; + u32 dropped_packet_count; /* 0x50 */ + u32 pad9; + u32 dropped_packet_count_rc; /* 0x58 */ + u32 pad10; +}; + +/* Break the vnic_rq_buf allocations into blocks of 64 entries */ +#define VNIC_RQ_BUF_BLK_ENTRIES 64 +#define VNIC_RQ_BUF_BLK_SZ \ + (VNIC_RQ_BUF_BLK_ENTRIES * sizeof(struct vnic_rq_buf)) +#define VNIC_RQ_BUF_BLKS_NEEDED(entries) \ + DIV_ROUND_UP(entries, VNIC_RQ_BUF_BLK_ENTRIES) +#define VNIC_RQ_BUF_BLKS_MAX VNIC_RQ_BUF_BLKS_NEEDED(4096) + +struct vnic_rq_buf { + struct vnic_rq_buf *next; + dma_addr_t dma_addr; + void *os_buf; + unsigned int os_buf_index; + unsigned int len; + unsigned int index; + void *desc; +}; + +struct vnic_rq { + unsigned int index; + struct vnic_dev *vdev; + struct vnic_rq_ctrl __iomem *ctrl; /* memory-mapped */ + struct vnic_dev_ring ring; + struct vnic_rq_buf *bufs[VNIC_RQ_BUF_BLKS_MAX]; + struct vnic_rq_buf *to_use; + struct vnic_rq_buf *to_clean; + void *os_buf_head; + unsigned int buf_index; + unsigned int pkts_outstanding; +}; + +static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq) +{ + /* how many does SW own? */ + return rq->ring.desc_avail; +} + +static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq) +{ + /* how many does HW own? */ + return rq->ring.desc_count - rq->ring.desc_avail - 1; +} + +static inline void *vnic_rq_next_desc(struct vnic_rq *rq) +{ + return rq->to_use->desc; +} + +static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq) +{ + return rq->to_use->index; +} + +static inline unsigned int vnic_rq_next_buf_index(struct vnic_rq *rq) +{ + return rq->buf_index++; +} + +static inline void vnic_rq_post(struct vnic_rq *rq, + void *os_buf, unsigned int os_buf_index, + dma_addr_t dma_addr, unsigned int len) +{ + struct vnic_rq_buf *buf = rq->to_use; + + buf->os_buf = os_buf; + buf->os_buf_index = os_buf_index; + buf->dma_addr = dma_addr; + buf->len = len; + + buf = buf->next; + rq->to_use = buf; + rq->ring.desc_avail--; + + /* Move the posted_index every nth descriptor + */ + +#ifndef VNIC_RQ_RETURN_RATE +#define VNIC_RQ_RETURN_RATE 0xf /* keep 2^n - 1 */ +#endif + + if ((buf->index & VNIC_RQ_RETURN_RATE) == 0) { + /* Adding write memory barrier prevents compiler and/or CPU + * reordering, thus avoiding descriptor posting before + * descriptor is initialized. Otherwise, hardware can read + * stale descriptor fields. + */ + wmb(); + iowrite32(buf->index, &rq->ctrl->posted_index); + } +} + +static inline int vnic_rq_posting_soon(struct vnic_rq *rq) +{ + return (rq->to_use->index & VNIC_RQ_RETURN_RATE) == 0; +} + +static inline void vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count) +{ + rq->ring.desc_avail += count; +} + +enum desc_return_options { + VNIC_RQ_RETURN_DESC, + VNIC_RQ_DEFER_RETURN_DESC, +}; + +static inline void vnic_rq_service(struct vnic_rq *rq, + struct cq_desc *cq_desc, u16 completed_index, + int desc_return, void (*buf_service)(struct vnic_rq *rq, + struct cq_desc *cq_desc, struct vnic_rq_buf *buf, + int skipped, void *opaque), void *opaque) +{ + struct vnic_rq_buf *buf; + int skipped; + + buf = rq->to_clean; + while (1) { + + skipped = (buf->index != completed_index); + + (*buf_service)(rq, cq_desc, buf, skipped, opaque); + + if (desc_return == VNIC_RQ_RETURN_DESC) + rq->ring.desc_avail++; + + rq->to_clean = buf->next; + + if (!skipped) + break; + + buf = rq->to_clean; + } +} + +static inline int vnic_rq_fill(struct vnic_rq *rq, + int (*buf_fill)(struct vnic_rq *rq)) +{ + int err; + + while (vnic_rq_desc_avail(rq) > 1) { + + err = (*buf_fill)(rq); + if (err) + return err; + } + + return 0; +} + +void vnic_rq_free(struct vnic_rq *rq); +int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index, + unsigned int desc_count, unsigned int desc_size); +void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index, + unsigned int error_interrupt_enable, + unsigned int error_interrupt_offset); +unsigned int vnic_rq_error_status(struct vnic_rq *rq); +void vnic_rq_enable(struct vnic_rq *rq); +int vnic_rq_disable(struct vnic_rq *rq); +void vnic_rq_clean(struct vnic_rq *rq, + void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf)); + +#endif /* _VNIC_RQ_H_ */ diff --git a/drivers/scsi/fnic/vnic_scsi.h b/drivers/scsi/fnic/vnic_scsi.h new file mode 100644 index 00000000000..46baa525400 --- /dev/null +++ b/drivers/scsi/fnic/vnic_scsi.h @@ -0,0 +1,99 @@ +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef _VNIC_SCSI_H_ +#define _VNIC_SCSI_H_ + +#define VNIC_FNIC_WQ_COPY_COUNT_MIN 1 +#define VNIC_FNIC_WQ_COPY_COUNT_MAX 1 + +#define VNIC_FNIC_WQ_DESCS_MIN 64 +#define VNIC_FNIC_WQ_DESCS_MAX 128 + +#define VNIC_FNIC_WQ_COPY_DESCS_MIN 64 +#define VNIC_FNIC_WQ_COPY_DESCS_MAX 512 + +#define VNIC_FNIC_RQ_DESCS_MIN 64 +#define VNIC_FNIC_RQ_DESCS_MAX 128 + +#define VNIC_FNIC_EDTOV_MIN 1000 +#define VNIC_FNIC_EDTOV_MAX 255000 +#define VNIC_FNIC_EDTOV_DEF 2000 + +#define VNIC_FNIC_RATOV_MIN 1000 +#define VNIC_FNIC_RATOV_MAX 255000 + +#define VNIC_FNIC_MAXDATAFIELDSIZE_MIN 256 +#define VNIC_FNIC_MAXDATAFIELDSIZE_MAX 2112 + +#define VNIC_FNIC_FLOGI_RETRIES_MIN 0 +#define VNIC_FNIC_FLOGI_RETRIES_MAX 0xffffffff +#define VNIC_FNIC_FLOGI_RETRIES_DEF 0xffffffff + +#define VNIC_FNIC_FLOGI_TIMEOUT_MIN 1000 +#define VNIC_FNIC_FLOGI_TIMEOUT_MAX 255000 + +#define VNIC_FNIC_PLOGI_RETRIES_MIN 0 +#define VNIC_FNIC_PLOGI_RETRIES_MAX 255 +#define VNIC_FNIC_PLOGI_RETRIES_DEF 8 + +#define VNIC_FNIC_PLOGI_TIMEOUT_MIN 1000 +#define VNIC_FNIC_PLOGI_TIMEOUT_MAX 255000 + +#define VNIC_FNIC_IO_THROTTLE_COUNT_MIN 256 +#define VNIC_FNIC_IO_THROTTLE_COUNT_MAX 4096 + +#define VNIC_FNIC_LINK_DOWN_TIMEOUT_MIN 0 +#define VNIC_FNIC_LINK_DOWN_TIMEOUT_MAX 240000 + +#define VNIC_FNIC_PORT_DOWN_TIMEOUT_MIN 0 +#define VNIC_FNIC_PORT_DOWN_TIMEOUT_MAX 240000 + +#define VNIC_FNIC_PORT_DOWN_IO_RETRIES_MIN 0 +#define VNIC_FNIC_PORT_DOWN_IO_RETRIES_MAX 255 + +#define VNIC_FNIC_LUNS_PER_TARGET_MIN 1 +#define VNIC_FNIC_LUNS_PER_TARGET_MAX 1024 + +/* Device-specific region: scsi configuration */ +struct vnic_fc_config { + u64 node_wwn; + u64 port_wwn; + u32 flags; + u32 wq_enet_desc_count; + u32 wq_copy_desc_count; + u32 rq_desc_count; + u32 flogi_retries; + u32 flogi_timeout; + u32 plogi_retries; + u32 plogi_timeout; + u32 io_throttle_count; + u32 link_down_timeout; + u32 port_down_timeout; + u32 port_down_io_retries; + u32 luns_per_tgt; + u16 maxdatafieldsize; + u16 ed_tov; + u16 ra_tov; + u16 intr_timer; + u8 intr_timer_type; +}; + +#define VFCF_FCP_SEQ_LVL_ERR 0x1 /* Enable FCP-2 Error Recovery */ +#define VFCF_PERBI 0x2 /* persistent binding info available */ + +#endif /* _VNIC_SCSI_H_ */ diff --git a/drivers/scsi/fnic/vnic_stats.h b/drivers/scsi/fnic/vnic_stats.h new file mode 100644 index 00000000000..5372e23c1cb --- /dev/null +++ b/drivers/scsi/fnic/vnic_stats.h @@ -0,0 +1,68 @@ +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef _VNIC_STATS_H_ +#define _VNIC_STATS_H_ + +/* Tx statistics */ +struct vnic_tx_stats { + u64 tx_frames_ok; + u64 tx_unicast_frames_ok; + u64 tx_multicast_frames_ok; + u64 tx_broadcast_frames_ok; + u64 tx_bytes_ok; + u64 tx_unicast_bytes_ok; + u64 tx_multicast_bytes_ok; + u64 tx_broadcast_bytes_ok; + u64 tx_drops; + u64 tx_errors; + u64 tx_tso; + u64 rsvd[16]; +}; + +/* Rx statistics */ +struct vnic_rx_stats { + u64 rx_frames_ok; + u64 rx_frames_total; + u64 rx_unicast_frames_ok; + u64 rx_multicast_frames_ok; + u64 rx_broadcast_frames_ok; + u64 rx_bytes_ok; + u64 rx_unicast_bytes_ok; + u64 rx_multicast_bytes_ok; + u64 rx_broadcast_bytes_ok; + u64 rx_drop; + u64 rx_no_bufs; + u64 rx_errors; + u64 rx_rss; + u64 rx_crc_errors; + u64 rx_frames_64; + u64 rx_frames_127; + u64 rx_frames_255; + u64 rx_frames_511; + u64 rx_frames_1023; + u64 rx_frames_1518; + u64 rx_frames_to_max; + u64 rsvd[16]; +}; + +struct vnic_stats { + struct vnic_tx_stats tx; + struct vnic_rx_stats rx; +}; + +#endif /* _VNIC_STATS_H_ */ diff --git a/drivers/scsi/fnic/vnic_wq.c b/drivers/scsi/fnic/vnic_wq.c new file mode 100644 index 00000000000..1f9ea790d13 --- /dev/null +++ b/drivers/scsi/fnic/vnic_wq.c @@ -0,0 +1,182 @@ +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include "vnic_dev.h" +#include "vnic_wq.h" + +static int vnic_wq_alloc_bufs(struct vnic_wq *wq) +{ + struct vnic_wq_buf *buf; + struct vnic_dev *vdev; + unsigned int i, j, count = wq->ring.desc_count; + unsigned int blks = VNIC_WQ_BUF_BLKS_NEEDED(count); + + vdev = wq->vdev; + + for (i = 0; i < blks; i++) { + wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ, GFP_ATOMIC); + if (!wq->bufs[i]) { + printk(KERN_ERR "Failed to alloc wq_bufs\n"); + return -ENOMEM; + } + } + + for (i = 0; i < blks; i++) { + buf = wq->bufs[i]; + for (j = 0; j < VNIC_WQ_BUF_BLK_ENTRIES; j++) { + buf->index = i * VNIC_WQ_BUF_BLK_ENTRIES + j; + buf->desc = (u8 *)wq->ring.descs + + wq->ring.desc_size * buf->index; + if (buf->index + 1 == count) { + buf->next = wq->bufs[0]; + break; + } else if (j + 1 == VNIC_WQ_BUF_BLK_ENTRIES) { + buf->next = wq->bufs[i + 1]; + } else { + buf->next = buf + 1; + buf++; + } + } + } + + wq->to_use = wq->to_clean = wq->bufs[0]; + + return 0; +} + +void vnic_wq_free(struct vnic_wq *wq) +{ + struct vnic_dev *vdev; + unsigned int i; + + vdev = wq->vdev; + + vnic_dev_free_desc_ring(vdev, &wq->ring); + + for (i = 0; i < VNIC_WQ_BUF_BLKS_MAX; i++) { + kfree(wq->bufs[i]); + wq->bufs[i] = NULL; + } + + wq->ctrl = NULL; + +} + +int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index, + unsigned int desc_count, unsigned int desc_size) +{ + int err; + + wq->index = index; + wq->vdev = vdev; + + wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_WQ, index); + if (!wq->ctrl) { + printk(KERN_ERR "Failed to hook WQ[%d] resource\n", index); + return -EINVAL; + } + + vnic_wq_disable(wq); + + err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size); + if (err) + return err; + + err = vnic_wq_alloc_bufs(wq); + if (err) { + vnic_wq_free(wq); + return err; + } + + return 0; +} + +void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index, + unsigned int error_interrupt_enable, + unsigned int error_interrupt_offset) +{ + u64 paddr; + + paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET; + writeq(paddr, &wq->ctrl->ring_base); + iowrite32(wq->ring.desc_count, &wq->ctrl->ring_size); + iowrite32(0, &wq->ctrl->fetch_index); + iowrite32(0, &wq->ctrl->posted_index); + iowrite32(cq_index, &wq->ctrl->cq_index); + iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable); + iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset); + iowrite32(0, &wq->ctrl->error_status); +} + +unsigned int vnic_wq_error_status(struct vnic_wq *wq) +{ + return ioread32(&wq->ctrl->error_status); +} + +void vnic_wq_enable(struct vnic_wq *wq) +{ + iowrite32(1, &wq->ctrl->enable); +} + +int vnic_wq_disable(struct vnic_wq *wq) +{ + unsigned int wait; + + iowrite32(0, &wq->ctrl->enable); + + /* Wait for HW to ACK disable request */ + for (wait = 0; wait < 100; wait++) { + if (!(ioread32(&wq->ctrl->running))) + return 0; + udelay(1); + } + + printk(KERN_ERR "Failed to disable WQ[%d]\n", wq->index); + + return -ETIMEDOUT; +} + +void vnic_wq_clean(struct vnic_wq *wq, + void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf)) +{ + struct vnic_wq_buf *buf; + + BUG_ON(ioread32(&wq->ctrl->enable)); + + buf = wq->to_clean; + + while (vnic_wq_desc_used(wq) > 0) { + + (*buf_clean)(wq, buf); + + buf = wq->to_clean = buf->next; + wq->ring.desc_avail++; + } + + wq->to_use = wq->to_clean = wq->bufs[0]; + + iowrite32(0, &wq->ctrl->fetch_index); + iowrite32(0, &wq->ctrl->posted_index); + iowrite32(0, &wq->ctrl->error_status); + + vnic_dev_clear_desc_ring(&wq->ring); +} diff --git a/drivers/scsi/fnic/vnic_wq.h b/drivers/scsi/fnic/vnic_wq.h new file mode 100644 index 00000000000..5cd094f7928 --- /dev/null +++ b/drivers/scsi/fnic/vnic_wq.h @@ -0,0 +1,175 @@ +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef _VNIC_WQ_H_ +#define _VNIC_WQ_H_ + +#include +#include "vnic_dev.h" +#include "vnic_cq.h" + +/* + * These defines avoid symbol clash between fnic and enic (Cisco 10G Eth + * Driver) when both are built with CONFIG options =y + */ +#define vnic_wq_desc_avail fnic_wq_desc_avail +#define vnic_wq_desc_used fnic_wq_desc_used +#define vnic_wq_next_desc fni_cwq_next_desc +#define vnic_wq_post fnic_wq_post +#define vnic_wq_service fnic_wq_service +#define vnic_wq_free fnic_wq_free +#define vnic_wq_alloc fnic_wq_alloc +#define vnic_wq_init fnic_wq_init +#define vnic_wq_error_status fnic_wq_error_status +#define vnic_wq_enable fnic_wq_enable +#define vnic_wq_disable fnic_wq_disable +#define vnic_wq_clean fnic_wq_clean + +/* Work queue control */ +struct vnic_wq_ctrl { + u64 ring_base; /* 0x00 */ + u32 ring_size; /* 0x08 */ + u32 pad0; + u32 posted_index; /* 0x10 */ + u32 pad1; + u32 cq_index; /* 0x18 */ + u32 pad2; + u32 enable; /* 0x20 */ + u32 pad3; + u32 running; /* 0x28 */ + u32 pad4; + u32 fetch_index; /* 0x30 */ + u32 pad5; + u32 dca_value; /* 0x38 */ + u32 pad6; + u32 error_interrupt_enable; /* 0x40 */ + u32 pad7; + u32 error_interrupt_offset; /* 0x48 */ + u32 pad8; + u32 error_status; /* 0x50 */ + u32 pad9; +}; + +struct vnic_wq_buf { + struct vnic_wq_buf *next; + dma_addr_t dma_addr; + void *os_buf; + unsigned int len; + unsigned int index; + int sop; + void *desc; +}; + +/* Break the vnic_wq_buf allocations into blocks of 64 entries */ +#define VNIC_WQ_BUF_BLK_ENTRIES 64 +#define VNIC_WQ_BUF_BLK_SZ \ + (VNIC_WQ_BUF_BLK_ENTRIES * sizeof(struct vnic_wq_buf)) +#define VNIC_WQ_BUF_BLKS_NEEDED(entries) \ + DIV_ROUND_UP(entries, VNIC_WQ_BUF_BLK_ENTRIES) +#define VNIC_WQ_BUF_BLKS_MAX VNIC_WQ_BUF_BLKS_NEEDED(4096) + +struct vnic_wq { + unsigned int index; + struct vnic_dev *vdev; + struct vnic_wq_ctrl __iomem *ctrl; /* memory-mapped */ + struct vnic_dev_ring ring; + struct vnic_wq_buf *bufs[VNIC_WQ_BUF_BLKS_MAX]; + struct vnic_wq_buf *to_use; + struct vnic_wq_buf *to_clean; + unsigned int pkts_outstanding; +}; + +static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq) +{ + /* how many does SW own? */ + return wq->ring.desc_avail; +} + +static inline unsigned int vnic_wq_desc_used(struct vnic_wq *wq) +{ + /* how many does HW own? */ + return wq->ring.desc_count - wq->ring.desc_avail - 1; +} + +static inline void *vnic_wq_next_desc(struct vnic_wq *wq) +{ + return wq->to_use->desc; +} + +static inline void vnic_wq_post(struct vnic_wq *wq, + void *os_buf, dma_addr_t dma_addr, + unsigned int len, int sop, int eop) +{ + struct vnic_wq_buf *buf = wq->to_use; + + buf->sop = sop; + buf->os_buf = eop ? os_buf : NULL; + buf->dma_addr = dma_addr; + buf->len = len; + + buf = buf->next; + if (eop) { + /* Adding write memory barrier prevents compiler and/or CPU + * reordering, thus avoiding descriptor posting before + * descriptor is initialized. Otherwise, hardware can read + * stale descriptor fields. + */ + wmb(); + iowrite32(buf->index, &wq->ctrl->posted_index); + } + wq->to_use = buf; + + wq->ring.desc_avail--; +} + +static inline void vnic_wq_service(struct vnic_wq *wq, + struct cq_desc *cq_desc, u16 completed_index, + void (*buf_service)(struct vnic_wq *wq, + struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque), + void *opaque) +{ + struct vnic_wq_buf *buf; + + buf = wq->to_clean; + while (1) { + + (*buf_service)(wq, cq_desc, buf, opaque); + + wq->ring.desc_avail++; + + wq->to_clean = buf->next; + + if (buf->index == completed_index) + break; + + buf = wq->to_clean; + } +} + +void vnic_wq_free(struct vnic_wq *wq); +int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index, + unsigned int desc_count, unsigned int desc_size); +void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index, + unsigned int error_interrupt_enable, + unsigned int error_interrupt_offset); +unsigned int vnic_wq_error_status(struct vnic_wq *wq); +void vnic_wq_enable(struct vnic_wq *wq); +int vnic_wq_disable(struct vnic_wq *wq); +void vnic_wq_clean(struct vnic_wq *wq, + void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf)); + +#endif /* _VNIC_WQ_H_ */ diff --git a/drivers/scsi/fnic/vnic_wq_copy.c b/drivers/scsi/fnic/vnic_wq_copy.c new file mode 100644 index 00000000000..9eab7e7caf3 --- /dev/null +++ b/drivers/scsi/fnic/vnic_wq_copy.c @@ -0,0 +1,117 @@ +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include "vnic_wq_copy.h" + +void vnic_wq_copy_enable(struct vnic_wq_copy *wq) +{ + iowrite32(1, &wq->ctrl->enable); +} + +int vnic_wq_copy_disable(struct vnic_wq_copy *wq) +{ + unsigned int wait; + + iowrite32(0, &wq->ctrl->enable); + + /* Wait for HW to ACK disable request */ + for (wait = 0; wait < 100; wait++) { + if (!(ioread32(&wq->ctrl->running))) + return 0; + udelay(1); + } + + printk(KERN_ERR "Failed to disable Copy WQ[%d]," + " fetch index=%d, posted_index=%d\n", + wq->index, ioread32(&wq->ctrl->fetch_index), + ioread32(&wq->ctrl->posted_index)); + + return -ENODEV; +} + +void vnic_wq_copy_clean(struct vnic_wq_copy *wq, + void (*q_clean)(struct vnic_wq_copy *wq, + struct fcpio_host_req *wq_desc)) +{ + BUG_ON(ioread32(&wq->ctrl->enable)); + + if (vnic_wq_copy_desc_in_use(wq)) + vnic_wq_copy_service(wq, -1, q_clean); + + wq->to_use_index = wq->to_clean_index = 0; + + iowrite32(0, &wq->ctrl->fetch_index); + iowrite32(0, &wq->ctrl->posted_index); + iowrite32(0, &wq->ctrl->error_status); + + vnic_dev_clear_desc_ring(&wq->ring); +} + +void vnic_wq_copy_free(struct vnic_wq_copy *wq) +{ + struct vnic_dev *vdev; + + vdev = wq->vdev; + vnic_dev_free_desc_ring(vdev, &wq->ring); + wq->ctrl = NULL; +} + +int vnic_wq_copy_alloc(struct vnic_dev *vdev, struct vnic_wq_copy *wq, + unsigned int index, unsigned int desc_count, + unsigned int desc_size) +{ + int err; + + wq->index = index; + wq->vdev = vdev; + wq->to_use_index = wq->to_clean_index = 0; + wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_WQ, index); + if (!wq->ctrl) { + printk(KERN_ERR "Failed to hook COPY WQ[%d] resource\n", index); + return -EINVAL; + } + + vnic_wq_copy_disable(wq); + + err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size); + if (err) + return err; + + return 0; +} + +void vnic_wq_copy_init(struct vnic_wq_copy *wq, unsigned int cq_index, + unsigned int error_interrupt_enable, + unsigned int error_interrupt_offset) +{ + u64 paddr; + + paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET; + writeq(paddr, &wq->ctrl->ring_base); + iowrite32(wq->ring.desc_count, &wq->ctrl->ring_size); + iowrite32(0, &wq->ctrl->fetch_index); + iowrite32(0, &wq->ctrl->posted_index); + iowrite32(cq_index, &wq->ctrl->cq_index); + iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable); + iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset); +} + diff --git a/drivers/scsi/fnic/vnic_wq_copy.h b/drivers/scsi/fnic/vnic_wq_copy.h new file mode 100644 index 00000000000..6aff9740c3d --- /dev/null +++ b/drivers/scsi/fnic/vnic_wq_copy.h @@ -0,0 +1,128 @@ +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef _VNIC_WQ_COPY_H_ +#define _VNIC_WQ_COPY_H_ + +#include +#include "vnic_wq.h" +#include "fcpio.h" + +#define VNIC_WQ_COPY_MAX 1 + +struct vnic_wq_copy { + unsigned int index; + struct vnic_dev *vdev; + struct vnic_wq_ctrl __iomem *ctrl; /* memory-mapped */ + struct vnic_dev_ring ring; + unsigned to_use_index; + unsigned to_clean_index; +}; + +static inline unsigned int vnic_wq_copy_desc_avail(struct vnic_wq_copy *wq) +{ + return wq->ring.desc_avail; +} + +static inline unsigned int vnic_wq_copy_desc_in_use(struct vnic_wq_copy *wq) +{ + return wq->ring.desc_count - 1 - wq->ring.desc_avail; +} + +static inline void *vnic_wq_copy_next_desc(struct vnic_wq_copy *wq) +{ + struct fcpio_host_req *desc = wq->ring.descs; + return &desc[wq->to_use_index]; +} + +static inline void vnic_wq_copy_post(struct vnic_wq_copy *wq) +{ + + ((wq->to_use_index + 1) == wq->ring.desc_count) ? + (wq->to_use_index = 0) : (wq->to_use_index++); + wq->ring.desc_avail--; + + /* Adding write memory barrier prevents compiler and/or CPU + * reordering, thus avoiding descriptor posting before + * descriptor is initialized. Otherwise, hardware can read + * stale descriptor fields. + */ + wmb(); + + iowrite32(wq->to_use_index, &wq->ctrl->posted_index); +} + +static inline void vnic_wq_copy_desc_process(struct vnic_wq_copy *wq, u16 index) +{ + unsigned int cnt; + + if (wq->to_clean_index <= index) + cnt = (index - wq->to_clean_index) + 1; + else + cnt = wq->ring.desc_count - wq->to_clean_index + index + 1; + + wq->to_clean_index = ((index + 1) % wq->ring.desc_count); + wq->ring.desc_avail += cnt; + +} + +static inline void vnic_wq_copy_service(struct vnic_wq_copy *wq, + u16 completed_index, + void (*q_service)(struct vnic_wq_copy *wq, + struct fcpio_host_req *wq_desc)) +{ + struct fcpio_host_req *wq_desc = wq->ring.descs; + unsigned int curr_index; + + while (1) { + + if (q_service) + (*q_service)(wq, &wq_desc[wq->to_clean_index]); + + wq->ring.desc_avail++; + + curr_index = wq->to_clean_index; + + /* increment the to-clean index so that we start + * with an unprocessed index next time we enter the loop + */ + ((wq->to_clean_index + 1) == wq->ring.desc_count) ? + (wq->to_clean_index = 0) : (wq->to_clean_index++); + + if (curr_index == completed_index) + break; + + /* we have cleaned all the entries */ + if ((completed_index == (u16)-1) && + (wq->to_clean_index == wq->to_use_index)) + break; + } +} + +void vnic_wq_copy_enable(struct vnic_wq_copy *wq); +int vnic_wq_copy_disable(struct vnic_wq_copy *wq); +void vnic_wq_copy_free(struct vnic_wq_copy *wq); +int vnic_wq_copy_alloc(struct vnic_dev *vdev, struct vnic_wq_copy *wq, + unsigned int index, unsigned int desc_count, unsigned int desc_size); +void vnic_wq_copy_init(struct vnic_wq_copy *wq, unsigned int cq_index, + unsigned int error_interrupt_enable, + unsigned int error_interrupt_offset); +void vnic_wq_copy_clean(struct vnic_wq_copy *wq, + void (*q_clean)(struct vnic_wq_copy *wq, + struct fcpio_host_req *wq_desc)); + +#endif /* _VNIC_WQ_COPY_H_ */ diff --git a/drivers/scsi/fnic/wq_enet_desc.h b/drivers/scsi/fnic/wq_enet_desc.h new file mode 100644 index 00000000000..b121cbad18b --- /dev/null +++ b/drivers/scsi/fnic/wq_enet_desc.h @@ -0,0 +1,96 @@ +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef _WQ_ENET_DESC_H_ +#define _WQ_ENET_DESC_H_ + +/* Ethernet work queue descriptor: 16B */ +struct wq_enet_desc { + __le64 address; + __le16 length; + __le16 mss_loopback; + __le16 header_length_flags; + __le16 vlan_tag; +}; + +#define WQ_ENET_ADDR_BITS 64 +#define WQ_ENET_LEN_BITS 14 +#define WQ_ENET_LEN_MASK ((1 << WQ_ENET_LEN_BITS) - 1) +#define WQ_ENET_MSS_BITS 14 +#define WQ_ENET_MSS_MASK ((1 << WQ_ENET_MSS_BITS) - 1) +#define WQ_ENET_MSS_SHIFT 2 +#define WQ_ENET_LOOPBACK_SHIFT 1 +#define WQ_ENET_HDRLEN_BITS 10 +#define WQ_ENET_HDRLEN_MASK ((1 << WQ_ENET_HDRLEN_BITS) - 1) +#define WQ_ENET_FLAGS_OM_BITS 2 +#define WQ_ENET_FLAGS_OM_MASK ((1 << WQ_ENET_FLAGS_OM_BITS) - 1) +#define WQ_ENET_FLAGS_EOP_SHIFT 12 +#define WQ_ENET_FLAGS_CQ_ENTRY_SHIFT 13 +#define WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT 14 +#define WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT 15 + +#define WQ_ENET_OFFLOAD_MODE_CSUM 0 +#define WQ_ENET_OFFLOAD_MODE_RESERVED 1 +#define WQ_ENET_OFFLOAD_MODE_CSUM_L4 2 +#define WQ_ENET_OFFLOAD_MODE_TSO 3 + +static inline void wq_enet_desc_enc(struct wq_enet_desc *desc, + u64 address, u16 length, u16 mss, u16 header_length, + u8 offload_mode, u8 eop, u8 cq_entry, u8 fcoe_encap, + u8 vlan_tag_insert, u16 vlan_tag, u8 loopback) +{ + desc->address = cpu_to_le64(address); + desc->length = cpu_to_le16(length & WQ_ENET_LEN_MASK); + desc->mss_loopback = cpu_to_le16((mss & WQ_ENET_MSS_MASK) << + WQ_ENET_MSS_SHIFT | (loopback & 1) << WQ_ENET_LOOPBACK_SHIFT); + desc->header_length_flags = cpu_to_le16( + (header_length & WQ_ENET_HDRLEN_MASK) | + (offload_mode & WQ_ENET_FLAGS_OM_MASK) << WQ_ENET_HDRLEN_BITS | + (eop & 1) << WQ_ENET_FLAGS_EOP_SHIFT | + (cq_entry & 1) << WQ_ENET_FLAGS_CQ_ENTRY_SHIFT | + (fcoe_encap & 1) << WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT | + (vlan_tag_insert & 1) << WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT); + desc->vlan_tag = cpu_to_le16(vlan_tag); +} + +static inline void wq_enet_desc_dec(struct wq_enet_desc *desc, + u64 *address, u16 *length, u16 *mss, u16 *header_length, + u8 *offload_mode, u8 *eop, u8 *cq_entry, u8 *fcoe_encap, + u8 *vlan_tag_insert, u16 *vlan_tag, u8 *loopback) +{ + *address = le64_to_cpu(desc->address); + *length = le16_to_cpu(desc->length) & WQ_ENET_LEN_MASK; + *mss = (le16_to_cpu(desc->mss_loopback) >> WQ_ENET_MSS_SHIFT) & + WQ_ENET_MSS_MASK; + *loopback = (u8)((le16_to_cpu(desc->mss_loopback) >> + WQ_ENET_LOOPBACK_SHIFT) & 1); + *header_length = le16_to_cpu(desc->header_length_flags) & + WQ_ENET_HDRLEN_MASK; + *offload_mode = (u8)((le16_to_cpu(desc->header_length_flags) >> + WQ_ENET_HDRLEN_BITS) & WQ_ENET_FLAGS_OM_MASK); + *eop = (u8)((le16_to_cpu(desc->header_length_flags) >> + WQ_ENET_FLAGS_EOP_SHIFT) & 1); + *cq_entry = (u8)((le16_to_cpu(desc->header_length_flags) >> + WQ_ENET_FLAGS_CQ_ENTRY_SHIFT) & 1); + *fcoe_encap = (u8)((le16_to_cpu(desc->header_length_flags) >> + WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT) & 1); + *vlan_tag_insert = (u8)((le16_to_cpu(desc->header_length_flags) >> + WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT) & 1); + *vlan_tag = le16_to_cpu(desc->vlan_tag); +} + +#endif /* _WQ_ENET_DESC_H_ */ diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h index babd4cc0cb2..36b1d1052ba 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.h +++ b/drivers/scsi/mpt2sas/mpt2sas_base.h @@ -69,7 +69,7 @@ #define MPT2SAS_AUTHOR "LSI Corporation " #define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver" #define MPT2SAS_DRIVER_VERSION "01.100.02.00" -#define MPT2SAS_MAJOR_VERSION 00 +#define MPT2SAS_MAJOR_VERSION 01 #define MPT2SAS_MINOR_VERSION 100 #define MPT2SAS_BUILD_VERSION 02 #define MPT2SAS_RELEASE_VERSION 00 diff --git a/drivers/scsi/osd/osd_uld.c b/drivers/scsi/osd/osd_uld.c index f644c9571ea..22b59e13ba8 100644 --- a/drivers/scsi/osd/osd_uld.c +++ b/drivers/scsi/osd/osd_uld.c @@ -173,26 +173,26 @@ static const struct file_operations osd_fops = { .unlocked_ioctl = osd_uld_ioctl, }; -struct osd_dev *osduld_path_lookup(const char *path) +struct osd_dev *osduld_path_lookup(const char *name) { - struct nameidata nd; + struct path path; struct inode *inode; struct cdev *cdev; struct osd_uld_device *uninitialized_var(oud); int error; - if (!path || !*path) { + if (!name || !*name) { OSD_ERR("Mount with !path || !*path\n"); return ERR_PTR(-EINVAL); } - error = path_lookup(path, LOOKUP_FOLLOW, &nd); + error = kern_path(name, LOOKUP_FOLLOW, &path); if (error) { - OSD_ERR("path_lookup of %s faild=>%d\n", path, error); + OSD_ERR("path_lookup of %s failed=>%d\n", name, error); return ERR_PTR(error); } - inode = nd.path.dentry->d_inode; + inode = path.dentry->d_inode; error = -EINVAL; /* Not the right device e.g osd_uld_device */ if (!S_ISCHR(inode->i_mode)) { OSD_DEBUG("!S_ISCHR()\n"); @@ -202,15 +202,15 @@ struct osd_dev *osduld_path_lookup(const char *path) cdev = inode->i_cdev; if (!cdev) { OSD_ERR("Before mounting an OSD Based filesystem\n"); - OSD_ERR(" user-mode must open+close the %s device\n", path); - OSD_ERR(" Example: bash: echo < %s\n", path); + OSD_ERR(" user-mode must open+close the %s device\n", name); + OSD_ERR(" Example: bash: echo < %s\n", name); goto out; } /* The Magic wand. Is it our char-dev */ /* TODO: Support sg devices */ if (cdev->owner != THIS_MODULE) { - OSD_ERR("Error mounting %s - is not an OSD device\n", path); + OSD_ERR("Error mounting %s - is not an OSD device\n", name); goto out; } @@ -220,7 +220,7 @@ struct osd_dev *osduld_path_lookup(const char *path) error = 0; out: - path_put(&nd.path); + path_put(&path); return error ? ERR_PTR(error) : &oud->od; } EXPORT_SYMBOL(osduld_path_lookup); diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 6f51ca485f3..e2b50d8f57a 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -425,6 +425,7 @@ static struct scsi_target *scsi_alloc_target(struct device *parent, INIT_LIST_HEAD(&starget->devices); starget->state = STARGET_CREATED; starget->scsi_level = SCSI_2; + starget->max_target_blocked = SCSI_DEFAULT_TARGET_BLOCKED; retry: spin_lock_irqsave(shost->host_lock, flags); diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 09479545529..0a2ce7b6325 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -357,7 +357,7 @@ int iscsi_session_chkready(struct iscsi_cls_session *session) err = 0; break; case ISCSI_SESSION_FAILED: - err = DID_TRANSPORT_DISRUPTED << 16; + err = DID_IMM_RETRY << 16; break; case ISCSI_SESSION_FREE: err = DID_TRANSPORT_FAILFAST << 16; diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c index b4b39811b44..a0127e93ade 100644 --- a/drivers/serial/8250.c +++ b/drivers/serial/8250.c @@ -137,6 +137,7 @@ struct uart_8250_port { unsigned char mcr; unsigned char mcr_mask; /* mask of user bits */ unsigned char mcr_force; /* mask of forced bits */ + unsigned char cur_iotype; /* Running I/O type */ /* * Some bits in registers are cleared on a read, so they must @@ -471,6 +472,7 @@ static void io_serial_out(struct uart_port *p, int offset, int value) static void set_io_from_upio(struct uart_port *p) { + struct uart_8250_port *up = (struct uart_8250_port *)p; switch (p->iotype) { case UPIO_HUB6: p->serial_in = hub6_serial_in; @@ -509,6 +511,8 @@ static void set_io_from_upio(struct uart_port *p) p->serial_out = io_serial_out; break; } + /* Remember loaded iotype */ + up->cur_iotype = p->iotype; } static void @@ -1937,6 +1941,9 @@ static int serial8250_startup(struct uart_port *port) up->capabilities = uart_config[up->port.type].flags; up->mcr = 0; + if (up->port.iotype != up->cur_iotype) + set_io_from_upio(port); + if (up->port.type == PORT_16C950) { /* Wake up and initialize UART */ up->acr = 0; @@ -2563,6 +2570,9 @@ static void serial8250_config_port(struct uart_port *port, int flags) if (ret < 0) probeflags &= ~PROBE_RSA; + if (up->port.iotype != up->cur_iotype) + set_io_from_upio(port); + if (flags & UART_CONFIG_TYPE) autoconfig(up, probeflags); if (up->port.type != PORT_UNKNOWN && flags & UART_CONFIG_IRQ) @@ -2671,6 +2681,11 @@ serial8250_register_ports(struct uart_driver *drv, struct device *dev) { int i; + for (i = 0; i < nr_uarts; i++) { + struct uart_8250_port *up = &serial8250_ports[i]; + up->cur_iotype = 0xFF; + } + serial8250_isa_init_ports(); for (i = 0; i < nr_uarts; i++) { diff --git a/drivers/serial/8250_gsc.c b/drivers/serial/8250_gsc.c index 418b4fe9a0a..33149d982e8 100644 --- a/drivers/serial/8250_gsc.c +++ b/drivers/serial/8250_gsc.c @@ -39,9 +39,9 @@ static int __init serial_init_chip(struct parisc_device *dev) */ if (parisc_parent(dev)->id.hw_type != HPHW_IOA) printk(KERN_INFO - "Serial: device 0x%lx not configured.\n" + "Serial: device 0x%llx not configured.\n" "Enable support for Wax, Lasi, Asp or Dino.\n", - dev->hpa.start); + (unsigned long long)dev->hpa.start); return -ENODEV; } diff --git a/drivers/serial/amba-pl010.c b/drivers/serial/amba-pl010.c index e3a5ad5ef1d..cdc049d4350 100644 --- a/drivers/serial/amba-pl010.c +++ b/drivers/serial/amba-pl010.c @@ -665,7 +665,7 @@ static struct uart_driver amba_reg = { .cons = AMBA_CONSOLE, }; -static int pl010_probe(struct amba_device *dev, void *id) +static int pl010_probe(struct amba_device *dev, struct amba_id *id) { struct uart_amba_port *uap; void __iomem *base; diff --git a/drivers/serial/amba-pl011.c b/drivers/serial/amba-pl011.c index 8b2b9700f3e..88fdac51b6c 100644 --- a/drivers/serial/amba-pl011.c +++ b/drivers/serial/amba-pl011.c @@ -729,7 +729,7 @@ static struct uart_driver amba_reg = { .cons = AMBA_CONSOLE, }; -static int pl011_probe(struct amba_device *dev, void *id) +static int pl011_probe(struct amba_device *dev, struct amba_id *id) { struct uart_amba_port *uap; void __iomem *base; diff --git a/drivers/serial/icom.c b/drivers/serial/icom.c index 6579e2be1dd..a461b3b2c72 100644 --- a/drivers/serial/icom.c +++ b/drivers/serial/icom.c @@ -1472,8 +1472,8 @@ static void icom_remove_adapter(struct icom_adapter *icom_adapter) free_irq(icom_adapter->pci_dev->irq, (void *) icom_adapter); iounmap(icom_adapter->base_addr); - icom_free_adapter(icom_adapter); pci_release_regions(icom_adapter->pci_dev); + icom_free_adapter(icom_adapter); } static void icom_kref_release(struct kref *kref) diff --git a/drivers/serial/imx.c b/drivers/serial/imx.c index 9f460b175c5..5f0be40dfda 100644 --- a/drivers/serial/imx.c +++ b/drivers/serial/imx.c @@ -1031,6 +1031,8 @@ imx_console_setup(struct console *co, char *options) if (co->index == -1 || co->index >= ARRAY_SIZE(imx_ports)) co->index = 0; sport = imx_ports[co->index]; + if(sport == NULL) + return -ENODEV; if (options) uart_parse_options(options, &baud, &parity, &bits, &flow); diff --git a/drivers/serial/mpc52xx_uart.c b/drivers/serial/mpc52xx_uart.c index 7f72f8ceaa6..b3feb6198d5 100644 --- a/drivers/serial/mpc52xx_uart.c +++ b/drivers/serial/mpc52xx_uart.c @@ -988,7 +988,7 @@ mpc52xx_console_setup(struct console *co, char *options) pr_debug("mpc52xx_console_setup co=%p, co->index=%i, options=%s\n", co, co->index, options); - if ((co->index < 0) || (co->index > MPC52xx_PSC_MAXNUM)) { + if ((co->index < 0) || (co->index >= MPC52xx_PSC_MAXNUM)) { pr_debug("PSC%x out of range\n", co->index); return -EINVAL; } diff --git a/drivers/serial/nwpserial.c b/drivers/serial/nwpserial.c index 32f3eaf0d26..9e150b19d72 100644 --- a/drivers/serial/nwpserial.c +++ b/drivers/serial/nwpserial.c @@ -145,11 +145,13 @@ static irqreturn_t nwpserial_interrupt(int irq, void *dev_id) ch = dcr_read(up->dcr_host, UART_RX); if (up->port.ignore_status_mask != NWPSERIAL_STATUS_RXVALID) tty_insert_flip_char(tty, ch, TTY_NORMAL); - } while (dcr_read(up->dcr_host, UART_RX) & UART_LSR_DR); + } while (dcr_read(up->dcr_host, UART_LSR) & UART_LSR_DR); tty_flip_buffer_push(tty); ret = IRQ_HANDLED; + /* clear interrupt */ + dcr_write(up->dcr_host, UART_IIR, 1); out: spin_unlock(&up->port.lock); return ret; diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c index 885194a0741..3f3c08c6ba4 100644 --- a/drivers/spi/pxa2xx_spi.c +++ b/drivers/spi/pxa2xx_spi.c @@ -1373,6 +1373,9 @@ static void cleanup(struct spi_device *spi) { struct chip_data *chip = spi_get_ctldata(spi); + if (!chip) + return; + if (gpio_is_valid(chip->gpio_cs)) gpio_free(chip->gpio_cs); diff --git a/drivers/ssb/embedded.c b/drivers/ssb/embedded.c index 7dc3a6b4139..a0e0d246b59 100644 --- a/drivers/ssb/embedded.c +++ b/drivers/ssb/embedded.c @@ -29,6 +29,7 @@ int ssb_watchdog_timer_set(struct ssb_bus *bus, u32 ticks) } return -ENODEV; } +EXPORT_SYMBOL(ssb_watchdog_timer_set); u32 ssb_gpio_in(struct ssb_bus *bus, u32 mask) { diff --git a/drivers/staging/comedi/TODO b/drivers/staging/comedi/TODO index 55781295846..15c9348fb93 100644 --- a/drivers/staging/comedi/TODO +++ b/drivers/staging/comedi/TODO @@ -11,4 +11,3 @@ Please send patches to Greg Kroah-Hartman and copy: Ian Abbott Frank Mori Hess - David Schleef diff --git a/drivers/staging/rt2870/rt2870.h b/drivers/staging/rt2870/rt2870.h index a42caa37080..a69cf338e49 100644 --- a/drivers/staging/rt2870/rt2870.h +++ b/drivers/staging/rt2870/rt2870.h @@ -145,6 +145,7 @@ {USB_DEVICE(0x0789,0x0162)}, /* Logitec */ \ {USB_DEVICE(0x0789,0x0163)}, /* Logitec */ \ {USB_DEVICE(0x0789,0x0164)}, /* Logitec */ \ + {USB_DEVICE(0x7392,0x7717)}, /* Edimax */ \ { }/* Terminating entry */ \ } diff --git a/drivers/staging/rtl8187se/r8180.h b/drivers/staging/rtl8187se/r8180.h index 12215fc61dd..db446b7e2e0 100644 --- a/drivers/staging/rtl8187se/r8180.h +++ b/drivers/staging/rtl8187se/r8180.h @@ -19,7 +19,7 @@ #define R8180H -#define RTL8180_MODULE_NAME "rtl8180" +#define RTL8180_MODULE_NAME "r8180" #define DMESG(x,a...) printk(KERN_INFO RTL8180_MODULE_NAME ": " x "\n", ## a) #define DMESGW(x,a...) printk(KERN_WARNING RTL8180_MODULE_NAME ": WW:" x "\n", ## a) #define DMESGE(x,a...) printk(KERN_WARNING RTL8180_MODULE_NAME ": EE:" x "\n", ## a) diff --git a/drivers/staging/rtl8187se/r8180_core.c b/drivers/staging/rtl8187se/r8180_core.c index 6ecd12de429..e10413cee0d 100644 --- a/drivers/staging/rtl8187se/r8180_core.c +++ b/drivers/staging/rtl8187se/r8180_core.c @@ -640,11 +640,9 @@ void rtl8180_proc_init_one(struct net_device *dev) { struct proc_dir_entry *e; struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev); - priv->dir_dev = create_proc_entry(dev->name, - S_IFDIR | S_IRUGO | S_IXUGO, - rtl8180_proc); + priv->dir_dev = rtl8180_proc; if (!priv->dir_dev) { - DMESGE("Unable to initialize /proc/net/rtl8180/%s\n", + DMESGE("Unable to initialize /proc/net/r8180/%s\n", dev->name); return; } @@ -654,7 +652,7 @@ void rtl8180_proc_init_one(struct net_device *dev) if (!e) { DMESGE("Unable to initialize " - "/proc/net/rtl8180/%s/stats-hw\n", + "/proc/net/r8180/%s/stats-hw\n", dev->name); } @@ -663,7 +661,7 @@ void rtl8180_proc_init_one(struct net_device *dev) if (!e) { DMESGE("Unable to initialize " - "/proc/net/rtl8180/%s/stats-rx\n", + "/proc/net/r8180/%s/stats-rx\n", dev->name); } @@ -673,7 +671,7 @@ void rtl8180_proc_init_one(struct net_device *dev) if (!e) { DMESGE("Unable to initialize " - "/proc/net/rtl8180/%s/stats-tx\n", + "/proc/net/r8180/%s/stats-tx\n", dev->name); } #if 0 @@ -702,7 +700,7 @@ void rtl8180_proc_init_one(struct net_device *dev) if (!e) { DMESGE("Unable to initialize " - "/proc/net/rtl8180/%s/registers\n", + "/proc/net/r8180/%s/registers\n", dev->name); } } @@ -977,13 +975,6 @@ void check_tx_ring(struct net_device *dev, int pri) *tmp & (1<<15)? "ok": "err", *(tmp+4)); } - DMESG("nic at %d", - (nic-nicbegin) / 8 /4); - DMESG("tail at %d", ((int)tail - (int)begin) /8 /4); - DMESG("head at %d", ((int)head - (int)begin) /8 /4); - DMESG("check free desc returns %d", check_nic_enought_desc(dev,pri)); - DMESG("free desc is %d\n", get_curr_tx_free_desc(dev,pri)); - //rtl8180_reset(dev); return; } @@ -1736,17 +1727,7 @@ short alloc_tx_desc_ring(struct net_device *dev, int bufsize, int count, * descriptor's buffer must be 256 byte aligned * we shouldn't be here, since we set DMA mask ! */ - DMESGW("Fixing TX alignment"); - desc = (u32*)((u8*)desc + 256); -#if (defined(CONFIG_HIGHMEM64G) || defined(CONFIG_64BIT_PHYS_ADDR)) - desc = (u32*)((u64)desc &~ 0xff); - dma_desc = (dma_addr_t)((u8*)dma_desc + 256); - dma_desc = (dma_addr_t)((u64)dma_desc &~ 0xff); -#else - desc = (u32*)((u32)desc &~ 0xff); - dma_desc = (dma_addr_t)((u8*)dma_desc + 256); - dma_desc = (dma_addr_t)((u32)dma_desc &~ 0xff); -#endif + WARN(1, "DMA buffer is not aligned\n"); } tmp=desc; for (i=0;irxring=desc; diff --git a/drivers/staging/winbond/wbusb.c b/drivers/staging/winbond/wbusb.c index 9c3f9439f35..3b2d52819b4 100644 --- a/drivers/staging/winbond/wbusb.c +++ b/drivers/staging/winbond/wbusb.c @@ -386,7 +386,7 @@ static int wb35_probe(struct usb_interface *intf, const struct usb_device_id *id if (err) goto error_free_hw; - usb_set_intfdata(intf, priv); + usb_set_intfdata(intf, dev); return 0; @@ -415,10 +415,15 @@ static void wb35_hw_halt(struct wbsoft_priv *adapter) static void wb35_disconnect(struct usb_interface *intf) { - struct wbsoft_priv *priv = usb_get_intfdata(intf); + struct ieee80211_hw *hw = usb_get_intfdata(intf); + struct wbsoft_priv *priv = hw->priv; wb35_hw_halt(priv); + ieee80211_stop_queues(hw); + ieee80211_unregister_hw(hw); + ieee80211_free_hw(hw); + usb_set_intfdata(intf, NULL); usb_put_dev(interface_to_usbdev(intf)); } diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c index d0b093b66ad..5e38ba10a3a 100644 --- a/drivers/thermal/thermal_sys.c +++ b/drivers/thermal/thermal_sys.c @@ -961,7 +961,7 @@ void thermal_zone_device_update(struct thermal_zone_device *tz) switch (trip_type) { case THERMAL_TRIP_CRITICAL: - if (temp > trip_temp) { + if (temp >= trip_temp) { if (tz->ops->notify) ret = tz->ops->notify(tz, count, trip_type); @@ -974,7 +974,7 @@ void thermal_zone_device_update(struct thermal_zone_device *tz) } break; case THERMAL_TRIP_HOT: - if (temp > trip_temp) + if (temp >= trip_temp) if (tz->ops->notify) tz->ops->notify(tz, count, trip_type); break; @@ -986,14 +986,14 @@ void thermal_zone_device_update(struct thermal_zone_device *tz) cdev = instance->cdev; - if (temp > trip_temp) + if (temp >= trip_temp) cdev->ops->set_cur_state(cdev, 1); else cdev->ops->set_cur_state(cdev, 0); } break; case THERMAL_TRIP_PASSIVE: - if (temp > trip_temp || tz->passive) + if (temp >= trip_temp || tz->passive) thermal_zone_device_passive(tz, temp, trip_temp, count); break; diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile index 89299a5ce16..0a3dc5ece63 100644 --- a/drivers/usb/Makefile +++ b/drivers/usb/Makefile @@ -11,7 +11,6 @@ obj-$(CONFIG_USB_MON) += mon/ obj-$(CONFIG_PCI) += host/ obj-$(CONFIG_USB_EHCI_HCD) += host/ obj-$(CONFIG_USB_ISP116X_HCD) += host/ -obj-$(CONFIG_USB_ISP1760_HCD) += host/ obj-$(CONFIG_USB_OHCI_HCD) += host/ obj-$(CONFIG_USB_UHCI_HCD) += host/ obj-$(CONFIG_USB_FHCI_HCD) += host/ @@ -27,6 +26,8 @@ obj-$(CONFIG_USB_WUSB) += wusbcore/ obj-$(CONFIG_USB_ACM) += class/ obj-$(CONFIG_USB_PRINTER) += class/ +obj-$(CONFIG_USB_WDM) += class/ +obj-$(CONFIG_USB_TMC) += class/ obj-$(CONFIG_USB_STORAGE) += storage/ obj-$(CONFIG_USB) += storage/ diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c index 6789089e246..56802d2e994 100644 --- a/drivers/usb/atm/cxacru.c +++ b/drivers/usb/atm/cxacru.c @@ -227,8 +227,14 @@ static ssize_t cxacru_sysfs_showattr_s8(s8 value, char *buf) static ssize_t cxacru_sysfs_showattr_dB(s16 value, char *buf) { - return snprintf(buf, PAGE_SIZE, "%d.%02u\n", - value / 100, abs(value) % 100); + if (likely(value >= 0)) { + return snprintf(buf, PAGE_SIZE, "%u.%02u\n", + value / 100, value % 100); + } else { + value = -value; + return snprintf(buf, PAGE_SIZE, "-%u.%02u\n", + value / 100, value % 100); + } } static ssize_t cxacru_sysfs_showattr_bool(u32 value, char *buf) diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 0a69c0977e3..7a1164dd1d3 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -1375,6 +1375,9 @@ static struct usb_device_id acm_ids[] = { { USB_DEVICE(0x0572, 0x1324), /* Conexant USB MODEM RD02-D400 */ .driver_info = NO_UNION_NORMAL, /* has no union descriptor */ }, + { USB_DEVICE(0x0572, 0x1328), /* Shiro / Aztech USB MODEM UM-3100 */ + .driver_info = NO_UNION_NORMAL, /* has no union descriptor */ + }, { USB_DEVICE(0x22b8, 0x6425), /* Motorola MOTOMAGX phones */ }, { USB_DEVICE(0x0572, 0x1329), /* Hummingbird huc56s (Conexant) */ diff --git a/drivers/usb/gadget/atmel_usba_udc.c b/drivers/usb/gadget/atmel_usba_udc.c index 563d5727544..05c913cc365 100644 --- a/drivers/usb/gadget/atmel_usba_udc.c +++ b/drivers/usb/gadget/atmel_usba_udc.c @@ -794,7 +794,8 @@ usba_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags) if (ep->desc) { list_add_tail(&req->queue, &ep->queue); - if (ep->is_in || (ep_is_control(ep) + if ((!ep_is_control(ep) && ep->is_in) || + (ep_is_control(ep) && (ep->state == DATA_STAGE_IN || ep->state == STATUS_STAGE_IN))) usba_ep_writel(ep, CTL_ENB, USBA_TX_PK_RDY); @@ -1940,7 +1941,7 @@ static int __init usba_udc_probe(struct platform_device *pdev) usba_writel(udc, CTRL, USBA_DISABLE_MASK); clk_disable(pclk); - usba_ep = kmalloc(sizeof(struct usba_ep) * pdata->num_ep, + usba_ep = kzalloc(sizeof(struct usba_ep) * pdata->num_ep, GFP_KERNEL); if (!usba_ep) goto err_alloc_ep; diff --git a/drivers/usb/gadget/usbstring.c b/drivers/usb/gadget/usbstring.c index 4154be375c7..58c4d37d312 100644 --- a/drivers/usb/gadget/usbstring.c +++ b/drivers/usb/gadget/usbstring.c @@ -38,7 +38,7 @@ static int utf8_to_utf16le(const char *s, __le16 *cp, unsigned len) uchar = (c & 0x1f) << 6; c = (u8) *s++; - if ((c & 0xc0) != 0xc0) + if ((c & 0xc0) != 0x80) goto fail; c &= 0x3f; uchar |= c; @@ -49,13 +49,13 @@ static int utf8_to_utf16le(const char *s, __le16 *cp, unsigned len) uchar = (c & 0x0f) << 12; c = (u8) *s++; - if ((c & 0xc0) != 0xc0) + if ((c & 0xc0) != 0x80) goto fail; c &= 0x3f; uchar |= c << 6; c = (u8) *s++; - if ((c & 0xc0) != 0xc0) + if ((c & 0xc0) != 0x80) goto fail; c &= 0x3f; uchar |= c; diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c index cd07ea3f0c6..15438469f21 100644 --- a/drivers/usb/host/isp1760-hcd.c +++ b/drivers/usb/host/isp1760-hcd.c @@ -1658,6 +1658,7 @@ static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, u32 reg_base, or_reg, skip_reg; unsigned long flags; struct ptd ptd; + packet_enqueue *pe; switch (usb_pipetype(urb->pipe)) { case PIPE_ISOCHRONOUS: @@ -1669,6 +1670,7 @@ static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, reg_base = INT_REGS_OFFSET; or_reg = HC_INT_IRQ_MASK_OR_REG; skip_reg = HC_INT_PTD_SKIPMAP_REG; + pe = enqueue_an_INT_packet; break; default: @@ -1676,6 +1678,7 @@ static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, reg_base = ATL_REGS_OFFSET; or_reg = HC_ATL_IRQ_MASK_OR_REG; skip_reg = HC_ATL_PTD_SKIPMAP_REG; + pe = enqueue_an_ATL_packet; break; } @@ -1687,6 +1690,7 @@ static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, u32 skip_map; u32 or_map; struct isp1760_qtd *qtd; + struct isp1760_qh *qh = ints->qh; skip_map = isp1760_readl(hcd->regs + skip_reg); skip_map |= 1 << i; @@ -1699,8 +1703,7 @@ static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, priv_write_copy(priv, (u32 *)&ptd, hcd->regs + reg_base + i * sizeof(ptd), sizeof(ptd)); qtd = ints->qtd; - - clean_up_qtdlist(qtd); + qtd = clean_up_qtdlist(qtd); free_mem(priv, ints->payload); @@ -1711,7 +1714,24 @@ static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, ints->payload = 0; isp1760_urb_done(priv, urb, status); + if (qtd) + pe(hcd, qh, qtd); break; + + } else if (ints->qtd) { + struct isp1760_qtd *qtd, *prev_qtd = ints->qtd; + + for (qtd = ints->qtd->hw_next; qtd; qtd = qtd->hw_next) { + if (qtd->urb == urb) { + prev_qtd->hw_next = clean_up_qtdlist(qtd); + isp1760_urb_done(priv, urb, status); + break; + } + prev_qtd = qtd; + } + /* we found the urb before the end of the list */ + if (qtd) + break; } ints++; } diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 8100f1d2590..d9fcdaedf38 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -56,6 +56,7 @@ static __u16 vendor = FTDI_VID; static __u16 product; struct ftdi_private { + struct kref kref; ftdi_chip_type_t chip_type; /* type of device, either SIO or FT8U232AM */ int baud_base; /* baud base clock for divisor setting */ @@ -669,6 +670,8 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(ADI_VID, ADI_GNICE_PID), .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, { USB_DEVICE(JETI_VID, JETI_SPC1201_PID) }, + { USB_DEVICE(MARVELL_VID, MARVELL_SHEEVAPLUG_PID), + .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, { }, /* Optional parameter entry */ { } /* Terminating entry */ }; @@ -1352,6 +1355,7 @@ static int ftdi_sio_port_probe(struct usb_serial_port *port) return -ENOMEM; } + kref_init(&priv->kref); spin_lock_init(&priv->rx_lock); spin_lock_init(&priv->tx_lock); init_waitqueue_head(&priv->delta_msr_wait); @@ -1468,6 +1472,13 @@ static void ftdi_shutdown(struct usb_serial *serial) dbg("%s", __func__); } +static void ftdi_sio_priv_release(struct kref *k) +{ + struct ftdi_private *priv = container_of(k, struct ftdi_private, kref); + + kfree(priv); +} + static int ftdi_sio_port_remove(struct usb_serial_port *port) { struct ftdi_private *priv = usb_get_serial_port_data(port); @@ -1476,14 +1487,7 @@ static int ftdi_sio_port_remove(struct usb_serial_port *port) remove_sysfs_attrs(port); - /* all open ports are closed at this point - * (by usbserial.c:__serial_close, which calls ftdi_close) - */ - - if (priv) { - usb_set_serial_port_data(port, NULL); - kfree(priv); - } + kref_put(&priv->kref, ftdi_sio_priv_release); return 0; } @@ -1547,7 +1551,8 @@ static int ftdi_open(struct tty_struct *tty, dev_err(&port->dev, "%s - failed submitting read urb, error %d\n", __func__, result); - + else + kref_get(&priv->kref); return result; } /* ftdi_open */ @@ -1589,11 +1594,11 @@ static void ftdi_close(struct tty_struct *tty, mutex_unlock(&port->serial->disc_mutex); /* cancel any scheduled reading */ - cancel_delayed_work(&priv->rx_work); - flush_scheduled_work(); + cancel_delayed_work_sync(&priv->rx_work); /* shutdown our bulk read */ usb_kill_urb(port->read_urb); + kref_put(&priv->kref, ftdi_sio_priv_release); } /* ftdi_close */ diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h index c09f658a448..12330fa1c09 100644 --- a/drivers/usb/serial/ftdi_sio.h +++ b/drivers/usb/serial/ftdi_sio.h @@ -919,6 +919,12 @@ #define JETI_VID 0x0c6c #define JETI_SPC1201_PID 0x04b2 +/* + * Marvell SheevaPlug + */ +#define MARVELL_VID 0x9e88 +#define MARVELL_SHEEVAPLUG_PID 0x9e8f + /* * BmRequestType: 1100 0000b * bRequest: FTDI_E2_READ diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c index 0a566eea49c..f331e2bde88 100644 --- a/drivers/usb/serial/usb-serial.c +++ b/drivers/usb/serial/usb-serial.c @@ -974,6 +974,7 @@ int usb_serial_probe(struct usb_interface *interface, if (retval > 0) { /* quietly accept this device, but don't bind to a serial port as it's about to disappear */ + serial->num_ports = 0; goto exit; } } diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index fa65a3b0860..4b8b69045fe 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -160,8 +160,9 @@ UNUSUAL_DEV( 0x0420, 0x0001, 0x0100, 0x0100, US_SC_DEVICE, US_PR_DEVICE, NULL, US_FL_IGNORE_RESIDUE ), -/* Reported by Andrew Nayenko */ -UNUSUAL_DEV( 0x0421, 0x0019, 0x0592, 0x0592, +/* Reported by Andrew Nayenko + * Updated for new firmware by Phillip Potter */ +UNUSUAL_DEV( 0x0421, 0x0019, 0x0592, 0x0610, "Nokia", "Nokia 6288", US_SC_DEVICE, US_PR_DEVICE, NULL, diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig index 7826bdce4bb..0048f1185a6 100644 --- a/drivers/video/Kconfig +++ b/drivers/video/Kconfig @@ -1128,13 +1128,6 @@ config FB_INTEL 830M/845G/852GM/855GM/865G/915G/915GM/945G/945GM/965G/965GM chipsets. Say Y if you have and plan to use such a board. - If you say Y here and want DDC/I2C support you must first say Y to - "I2C support" and "I2C bit-banging support" in the character devices - section. - - If you say M here then "I2C support" and "I2C bit-banging support" - can be build either as modules or built-in. - To compile this driver as a module, choose M here: the module will be called intelfb. @@ -1207,11 +1200,10 @@ config FB_MATROX_G pixel and 32 bpp packed pixel. You can also use font widths different from 8. - If you need support for G400 secondary head, you must first say Y to - "I2C support" in the character devices section, and then to - "Matrox I2C support" and "G400 second head support" here in the - framebuffer section. G450/G550 secondary head and digital output - are supported without additional modules. + If you need support for G400 secondary head, you must say Y to + "Matrox I2C support" and "G400 second head support" right below. + G450/G550 secondary head and digital output are supported without + additional modules. The driver starts in monitor mode. You must use the matroxset tool (available at ) to @@ -1310,13 +1302,6 @@ config FB_RADEON a framebuffer device. There are both PCI and AGP versions. You don't need to choose this to run the Radeon in plain VGA mode. - If you say Y here and want DDC/I2C support you must first say Y to - "I2C support" and "I2C bit-banging support" in the character devices - section. - - If you say M here then "I2C support" and "I2C bit-banging support" - can be build either as modules or built-in. - There is a product page at http://apps.ati.com/ATIcompare/ diff --git a/drivers/video/amba-clcd.c b/drivers/video/amba-clcd.c index 61050ab1412..d1f80bac54f 100644 --- a/drivers/video/amba-clcd.c +++ b/drivers/video/amba-clcd.c @@ -437,7 +437,7 @@ static int clcdfb_register(struct clcd_fb *fb) return ret; } -static int clcdfb_probe(struct amba_device *dev, void *id) +static int clcdfb_probe(struct amba_device *dev, struct amba_id *id) { struct clcd_board *board = dev->dev.platform_data; struct clcd_fb *fb; diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c index 9a577a800db..2fb63f6ea2f 100644 --- a/drivers/video/atmel_lcdfb.c +++ b/drivers/video/atmel_lcdfb.c @@ -29,14 +29,8 @@ /* configurable parameters */ #define ATMEL_LCDC_CVAL_DEFAULT 0xc8 -#define ATMEL_LCDC_DMA_BURST_LEN 8 - -#if defined(CONFIG_ARCH_AT91SAM9263) || defined(CONFIG_ARCH_AT91CAP9) || \ - defined(CONFIG_ARCH_AT91SAM9RL) -#define ATMEL_LCDC_FIFO_SIZE 2048 -#else -#define ATMEL_LCDC_FIFO_SIZE 512 -#endif +#define ATMEL_LCDC_DMA_BURST_LEN 8 /* words */ +#define ATMEL_LCDC_FIFO_SIZE 512 /* words */ #if defined(CONFIG_ARCH_AT91) #define ATMEL_LCDFB_FBINFO_DEFAULT (FBINFO_DEFAULT \ diff --git a/drivers/video/gbefb.c b/drivers/video/gbefb.c index fe5b519860b..1a83709f961 100644 --- a/drivers/video/gbefb.c +++ b/drivers/video/gbefb.c @@ -75,7 +75,7 @@ struct gbefb_par { static unsigned int gbe_mem_size = CONFIG_FB_GBE_MEM * 1024*1024; static void *gbe_mem; static dma_addr_t gbe_dma_addr; -unsigned long gbe_mem_phys; +static unsigned long gbe_mem_phys; static struct { uint16_t *cpu; @@ -185,8 +185,8 @@ static struct fb_videomode default_mode_LCD __initdata = { .vmode = FB_VMODE_NONINTERLACED, }; -struct fb_videomode *default_mode __initdata = &default_mode_CRT; -struct fb_var_screeninfo *default_var __initdata = &default_var_CRT; +static struct fb_videomode *default_mode __initdata = &default_mode_CRT; +static struct fb_var_screeninfo *default_var __initdata = &default_var_CRT; static int flat_panel_enabled = 0; @@ -205,7 +205,7 @@ static void gbe_reset(void) * console. */ -void gbe_turn_off(void) +static void gbe_turn_off(void) { int i; unsigned int val, x, y, vpixen_off; @@ -1097,7 +1097,7 @@ static void gbefb_create_sysfs(struct device *dev) * Initialization */ -int __init gbefb_setup(char *options) +static int __init gbefb_setup(char *options) { char *this_opt; @@ -1283,7 +1283,7 @@ static struct platform_driver gbefb_driver = { static struct platform_device *gbefb_device; -int __init gbefb_init(void) +static int __init gbefb_init(void) { int ret = platform_driver_register(&gbefb_driver); if (!ret) { @@ -1301,7 +1301,7 @@ int __init gbefb_init(void) return ret; } -void __exit gbefb_exit(void) +static void __exit gbefb_exit(void) { platform_device_unregister(gbefb_device); platform_driver_unregister(&gbefb_driver); diff --git a/drivers/video/omap/dispc.c b/drivers/video/omap/dispc.c index dfb72f5e4c9..148cbcc3960 100644 --- a/drivers/video/omap/dispc.c +++ b/drivers/video/omap/dispc.c @@ -880,20 +880,22 @@ static irqreturn_t omap_dispc_irq_handler(int irq, void *dev) static int get_dss_clocks(void) { - if (IS_ERR((dispc.dss_ick = clk_get(dispc.fbdev->dev, "dss_ick")))) { - dev_err(dispc.fbdev->dev, "can't get dss_ick\n"); + dispc.dss_ick = clk_get(dispc.fbdev->dev, "ick"); + if (IS_ERR(dispc.dss_ick)) { + dev_err(dispc.fbdev->dev, "can't get ick\n"); return PTR_ERR(dispc.dss_ick); } - if (IS_ERR((dispc.dss1_fck = clk_get(dispc.fbdev->dev, "dss1_fck")))) { + dispc.dss1_fck = clk_get(dispc.fbdev->dev, "dss1_fck"); + if (IS_ERR(dispc.dss1_fck)) { dev_err(dispc.fbdev->dev, "can't get dss1_fck\n"); clk_put(dispc.dss_ick); return PTR_ERR(dispc.dss1_fck); } - if (IS_ERR((dispc.dss_54m_fck = - clk_get(dispc.fbdev->dev, "dss_54m_fck")))) { - dev_err(dispc.fbdev->dev, "can't get dss_54m_fck\n"); + dispc.dss_54m_fck = clk_get(dispc.fbdev->dev, "tv_fck"); + if (IS_ERR(dispc.dss_54m_fck)) { + dev_err(dispc.fbdev->dev, "can't get tv_fck\n"); clk_put(dispc.dss_ick); clk_put(dispc.dss1_fck); return PTR_ERR(dispc.dss_54m_fck); diff --git a/drivers/video/omap/rfbi.c b/drivers/video/omap/rfbi.c index a13c8dcad2a..9332d6ca645 100644 --- a/drivers/video/omap/rfbi.c +++ b/drivers/video/omap/rfbi.c @@ -83,12 +83,14 @@ static inline u32 rfbi_read_reg(int idx) static int rfbi_get_clocks(void) { - if (IS_ERR((rfbi.dss_ick = clk_get(rfbi.fbdev->dev, "dss_ick")))) { - dev_err(rfbi.fbdev->dev, "can't get dss_ick\n"); + rfbi.dss_ick = clk_get(rfbi.fbdev->dev, "ick"); + if (IS_ERR(rfbi.dss_ick)) { + dev_err(rfbi.fbdev->dev, "can't get ick\n"); return PTR_ERR(rfbi.dss_ick); } - if (IS_ERR((rfbi.dss1_fck = clk_get(rfbi.fbdev->dev, "dss1_fck")))) { + rfbi.dss1_fck = clk_get(rfbi.fbdev->dev, "dss1_fck"); + if (IS_ERR(rfbi.dss1_fck)) { dev_err(rfbi.fbdev->dev, "can't get dss1_fck\n"); clk_put(rfbi.dss_ick); return PTR_ERR(rfbi.dss1_fck); diff --git a/drivers/video/s3c-fb.c b/drivers/video/s3c-fb.c index 5e9c6302433..d3a568e6b16 100644 --- a/drivers/video/s3c-fb.c +++ b/drivers/video/s3c-fb.c @@ -947,7 +947,8 @@ static int __devexit s3c_fb_remove(struct platform_device *pdev) int win; for (win = 0; win <= S3C_FB_MAX_WIN; win++) - s3c_fb_release_win(sfb, sfb->windows[win]); + if (sfb->windows[win]) + s3c_fb_release_win(sfb, sfb->windows[win]); iounmap(sfb->regs); @@ -985,11 +986,20 @@ static int s3c_fb_suspend(struct platform_device *pdev, pm_message_t state) static int s3c_fb_resume(struct platform_device *pdev) { struct s3c_fb *sfb = platform_get_drvdata(pdev); + struct s3c_fb_platdata *pd = sfb->pdata; struct s3c_fb_win *win; int win_no; clk_enable(sfb->bus_clk); + /* setup registers */ + writel(pd->vidcon1, sfb->regs + VIDCON1); + + /* zero all windows before we do anything */ + for (win_no = 0; win_no < S3C_FB_MAX_WIN; win_no++) + s3c_fb_clear_win(sfb, win_no); + + /* restore framebuffers */ for (win_no = 0; win_no < S3C_FB_MAX_WIN; win_no++) { win = sfb->windows[win_no]; if (!win) diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c index 92ea0ab44ce..f10d2fbeda0 100644 --- a/drivers/video/sh_mobile_lcdcfb.c +++ b/drivers/video/sh_mobile_lcdcfb.c @@ -47,6 +47,7 @@ struct sh_mobile_lcdc_priv { #endif unsigned long lddckr; struct sh_mobile_lcdc_chan ch[2]; + int started; }; /* shared registers */ @@ -451,6 +452,7 @@ static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv) /* start the lcdc */ sh_mobile_lcdc_start_stop(priv, 1); + priv->started = 1; /* tell the board code to enable the panel */ for (k = 0; k < ARRAY_SIZE(priv->ch); k++) { @@ -493,7 +495,10 @@ static void sh_mobile_lcdc_stop(struct sh_mobile_lcdc_priv *priv) } /* stop the lcdc */ - sh_mobile_lcdc_start_stop(priv, 0); + if (priv->started) { + sh_mobile_lcdc_start_stop(priv, 0); + priv->started = 0; + } /* stop clocks */ for (k = 0; k < ARRAY_SIZE(priv->ch); k++) diff --git a/drivers/video/via/viafbdev.c b/drivers/video/via/viafbdev.c index e327b84820d..a0fec298216 100644 --- a/drivers/video/via/viafbdev.c +++ b/drivers/video/via/viafbdev.c @@ -2103,7 +2103,7 @@ static void viafb_remove_proc(struct proc_dir_entry *viafb_entry) static int __devinit via_pci_probe(void) { - unsigned int default_xres, default_yres; + unsigned long default_xres, default_yres; char *tmpc, *tmpm; char *tmpc_sec, *tmpm_sec; int vmode_index; @@ -2196,8 +2196,8 @@ static int __devinit via_pci_probe(void) viafb_FB_MM = viaparinfo->fbmem_virt; tmpm = viafb_mode; tmpc = strsep(&tmpm, "x"); - strict_strtoul(tmpc, 0, (unsigned long *)&default_xres); - strict_strtoul(tmpm, 0, (unsigned long *)&default_yres); + strict_strtoul(tmpc, 0, &default_xres); + strict_strtoul(tmpm, 0, &default_yres); vmode_index = viafb_get_mode_index(default_xres, default_yres, 0); DEBUG_MSG(KERN_INFO "0->index=%d\n", vmode_index); diff --git a/firmware/cis/.gitignore b/firmware/cis/.gitignore new file mode 100644 index 00000000000..1de39847f26 --- /dev/null +++ b/firmware/cis/.gitignore @@ -0,0 +1 @@ +*.cis diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c index 5f8ab8adb5f..ab5547ff29a 100644 --- a/fs/9p/vfs_super.c +++ b/fs/9p/vfs_super.c @@ -37,6 +37,7 @@ #include #include #include +#include #include #include @@ -155,6 +156,7 @@ static int v9fs_get_sb(struct file_system_type *fs_type, int flags, root = d_alloc_root(inode); if (!root) { + iput(inode); retval = -ENOMEM; goto release_sb; } @@ -173,10 +175,7 @@ P9_DPRINTK(P9_DEBUG_VFS, " simple set mount, return 0\n"); return 0; release_sb: - if (sb) { - up_write(&sb->s_umount); - deactivate_super(sb); - } + deactivate_locked_super(sb); free_stat: kfree(st); @@ -230,9 +229,12 @@ static int v9fs_show_options(struct seq_file *m, struct vfsmount *mnt) static void v9fs_umount_begin(struct super_block *sb) { - struct v9fs_session_info *v9ses = sb->s_fs_info; + struct v9fs_session_info *v9ses; + lock_kernel(); + v9ses = sb->s_fs_info; v9fs_session_cancel(v9ses); + unlock_kernel(); } static const struct super_operations v9fs_super_ops = { diff --git a/fs/affs/super.c b/fs/affs/super.c index 5ce695e707f..63f5183f263 100644 --- a/fs/affs/super.c +++ b/fs/affs/super.c @@ -507,8 +507,7 @@ affs_remount(struct super_block *sb, int *flags, char *data) kfree(new_opts); return -EINVAL; } - kfree(sb->s_options); - sb->s_options = new_opts; + replace_mount_options(sb, new_opts); sbi->s_flags = mount_flags; sbi->s_mode = mode; diff --git a/fs/afs/super.c b/fs/afs/super.c index aee239a048c..76828e5f8a3 100644 --- a/fs/afs/super.c +++ b/fs/afs/super.c @@ -405,21 +405,20 @@ static int afs_get_sb(struct file_system_type *fs_type, sb->s_flags = flags; ret = afs_fill_super(sb, ¶ms); if (ret < 0) { - up_write(&sb->s_umount); - deactivate_super(sb); + deactivate_locked_super(sb); goto error; } - sb->s_options = new_opts; + save_mount_options(sb, new_opts); sb->s_flags |= MS_ACTIVE; } else { _debug("reuse"); - kfree(new_opts); ASSERTCMP(sb->s_flags, &, MS_ACTIVE); } simple_set_mnt(mnt, sb); afs_put_volume(params.volume); afs_put_cell(params.cell); + kfree(new_opts); _leave(" = 0 [%p]", sb); return 0; diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c index eeb24684590..2341375386f 100644 --- a/fs/autofs4/waitq.c +++ b/fs/autofs4/waitq.c @@ -297,20 +297,14 @@ static int validate_request(struct autofs_wait_queue **wait, */ if (notify == NFY_MOUNT) { /* - * If the dentry isn't hashed just go ahead and try the - * mount again with a new wait (not much else we can do). - */ - if (!d_unhashed(dentry)) { - /* - * But if the dentry is hashed, that means that we - * got here through the revalidate path. Thus, we - * need to check if the dentry has been mounted - * while we waited on the wq_mutex. If it has, - * simply return success. - */ - if (d_mountpoint(dentry)) - return 0; - } + * If the dentry was successfully mounted while we slept + * on the wait queue mutex we can return success. If it + * isn't mounted (doesn't have submounts for the case of + * a multi-mount with no mount at it's base) we can + * continue on and create a new request. + */ + if (have_submounts(dentry)) + return 0; } return 1; diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c index 5cebf0b3779..697f6b5f131 100644 --- a/fs/binfmt_flat.c +++ b/fs/binfmt_flat.c @@ -41,6 +41,7 @@ #include #include #include +#include /****************************************************************************/ @@ -54,6 +55,18 @@ #define DBG_FLT(a...) #endif +/* + * User data (stack, data section and bss) needs to be aligned + * for the same reasons as SLAB memory is, and to the same amount. + * Avoid duplicating architecture specific code by using the same + * macro as with SLAB allocation: + */ +#ifdef ARCH_SLAB_MINALIGN +#define FLAT_DATA_ALIGN (ARCH_SLAB_MINALIGN) +#else +#define FLAT_DATA_ALIGN (sizeof(void *)) +#endif + #define RELOC_FAILED 0xff00ff01 /* Relocation incorrect somewhere */ #define UNLOADED_LIB 0x7ff000ff /* Placeholder for unused library */ @@ -114,20 +127,18 @@ static unsigned long create_flat_tables( int envc = bprm->envc; char uninitialized_var(dummy); - sp = (unsigned long *) ((-(unsigned long)sizeof(char *))&(unsigned long) p); + sp = (unsigned long *)p; + sp -= (envc + argc + 2) + 1 + (flat_argvp_envp_on_stack() ? 2 : 0); + sp = (unsigned long *) ((unsigned long)sp & -FLAT_DATA_ALIGN); + argv = sp + 1 + (flat_argvp_envp_on_stack() ? 2 : 0); + envp = argv + (argc + 1); - sp -= envc+1; - envp = sp; - sp -= argc+1; - argv = sp; - - flat_stack_align(sp); if (flat_argvp_envp_on_stack()) { - --sp; put_user((unsigned long) envp, sp); - --sp; put_user((unsigned long) argv, sp); + put_user((unsigned long) envp, sp + 2); + put_user((unsigned long) argv, sp + 1); } - put_user(argc,--sp); + put_user(argc, sp); current->mm->arg_start = (unsigned long) p; while (argc-->0) { put_user((unsigned long) p, argv++); @@ -558,7 +569,9 @@ static int load_flat_file(struct linux_binprm * bprm, ret = realdatastart; goto err; } - datapos = realdatastart + MAX_SHARED_LIBS * sizeof(unsigned long); + datapos = ALIGN(realdatastart + + MAX_SHARED_LIBS * sizeof(unsigned long), + FLAT_DATA_ALIGN); DBG_FLT("BINFMT_FLAT: Allocated data+bss+stack (%d bytes): %x\n", (int)(data_len + bss_len + stack_len), (int)datapos); @@ -604,9 +617,12 @@ static int load_flat_file(struct linux_binprm * bprm, } realdatastart = textpos + ntohl(hdr->data_start); - datapos = realdatastart + MAX_SHARED_LIBS * sizeof(unsigned long); - reloc = (unsigned long *) (textpos + ntohl(hdr->reloc_start) + - MAX_SHARED_LIBS * sizeof(unsigned long)); + datapos = ALIGN(realdatastart + + MAX_SHARED_LIBS * sizeof(unsigned long), + FLAT_DATA_ALIGN); + + reloc = (unsigned long *) + (datapos + (ntohl(hdr->reloc_start) - text_len)); memp = textpos; memp_size = len; #ifdef CONFIG_BINFMT_ZFLAT @@ -854,7 +870,7 @@ static int load_flat_binary(struct linux_binprm * bprm, struct pt_regs * regs) stack_len = TOP_OF_ARGS - bprm->p; /* the strings */ stack_len += (bprm->argc + 1) * sizeof(char *); /* the argv array */ stack_len += (bprm->envc + 1) * sizeof(char *); /* the envp array */ - + stack_len += FLAT_DATA_ALIGN - 1; /* reserve for upcoming alignment */ res = load_flat_file(bprm, &libinfo, 0, &stack_len); if (res > (unsigned long)-4096) diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index a99f1c2a710..fedf8b9f03a 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -1469,6 +1469,7 @@ read_block_for_search(struct btrfs_trans_handle *trans, u32 blocksize; struct extent_buffer *b = *eb_ret; struct extent_buffer *tmp; + int ret; blocknr = btrfs_node_blockptr(b, slot); gen = btrfs_node_ptr_generation(b, slot); @@ -1476,6 +1477,10 @@ read_block_for_search(struct btrfs_trans_handle *trans, tmp = btrfs_find_tree_block(root, blocknr, blocksize); if (tmp && btrfs_buffer_uptodate(tmp, gen)) { + /* + * we found an up to date block without sleeping, return + * right away + */ *eb_ret = tmp; return 0; } @@ -1483,7 +1488,9 @@ read_block_for_search(struct btrfs_trans_handle *trans, /* * reduce lock contention at high levels * of the btree by dropping locks before - * we read. + * we read. Don't release the lock on the current + * level because we need to walk this node to figure + * out which blocks to read. */ btrfs_unlock_up_safe(p, level + 1); btrfs_set_path_blocking(p); @@ -1494,10 +1501,21 @@ read_block_for_search(struct btrfs_trans_handle *trans, reada_for_search(root, p, level, slot, key->objectid); btrfs_release_path(NULL, p); + + ret = -EAGAIN; tmp = read_tree_block(root, blocknr, blocksize, gen); - if (tmp) + if (tmp) { + /* + * If the read above didn't mark this buffer up to date, + * it will never end up being up to date. Set ret to EIO now + * and give up so that our caller doesn't loop forever + * on our EAGAINs. + */ + if (!btrfs_buffer_uptodate(tmp, 0)) + ret = -EIO; free_extent_buffer(tmp); - return -EAGAIN; + } + return ret; } /* @@ -1696,6 +1714,9 @@ cow_done: if (ret == -EAGAIN) goto again; + if (ret == -EIO) + goto done; + if (!p->skip_locking) { int lret; @@ -1738,6 +1759,8 @@ done: */ if (!p->leave_spinning) btrfs_set_path_blocking(p); + if (ret < 0) + btrfs_release_path(root, p); return ret; } @@ -4212,6 +4235,11 @@ again: if (ret == -EAGAIN) goto again; + if (ret < 0) { + btrfs_release_path(root, path); + goto done; + } + if (!path->skip_locking) { ret = btrfs_try_spin_lock(next); if (!ret) { @@ -4246,6 +4274,11 @@ again: if (ret == -EAGAIN) goto again; + if (ret < 0) { + btrfs_release_path(root, path); + goto done; + } + if (!path->skip_locking) { btrfs_assert_tree_locked(path->nodes[level]); ret = btrfs_try_spin_lock(next); diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 0ff16d3331d..4b0ea0b80c2 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -848,8 +848,6 @@ struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr, if (ret == 0) set_bit(EXTENT_BUFFER_UPTODATE, &buf->bflags); - else - WARN_ON(1); return buf; } diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index e4966444811..35af9335506 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -312,7 +312,7 @@ btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr) } /* - * return the block group that contains teh given bytenr + * return the block group that contains the given bytenr */ struct btrfs_block_group_cache *btrfs_lookup_block_group( struct btrfs_fs_info *info, @@ -2622,7 +2622,18 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans, search_start); if (block_group && block_group_bits(block_group, data)) { down_read(&space_info->groups_sem); - goto have_block_group; + if (list_empty(&block_group->list) || + block_group->ro) { + /* + * someone is removing this block group, + * we can't jump into the have_block_group + * target because our list pointers are not + * valid + */ + btrfs_put_block_group(block_group); + up_read(&space_info->groups_sem); + } else + goto have_block_group; } else if (block_group) { btrfs_put_block_group(block_group); } @@ -2656,6 +2667,13 @@ have_block_group: * people trying to start a new cluster */ spin_lock(&last_ptr->refill_lock); + if (last_ptr->block_group && + (last_ptr->block_group->ro || + !block_group_bits(last_ptr->block_group, data))) { + offset = 0; + goto refill_cluster; + } + offset = btrfs_alloc_from_cluster(block_group, last_ptr, num_bytes, search_start); if (offset) { @@ -2681,10 +2699,17 @@ have_block_group: last_ptr_loop = 1; search_start = block_group->key.objectid; + /* + * we know this block group is properly + * in the list because + * btrfs_remove_block_group, drops the + * cluster before it removes the block + * group from the list + */ goto have_block_group; } spin_unlock(&last_ptr->lock); - +refill_cluster: /* * this cluster didn't work out, free it and * start over @@ -5968,6 +5993,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, { struct btrfs_path *path; struct btrfs_block_group_cache *block_group; + struct btrfs_free_cluster *cluster; struct btrfs_key key; int ret; @@ -5979,6 +6005,21 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, memcpy(&key, &block_group->key, sizeof(key)); + /* make sure this block group isn't part of an allocation cluster */ + cluster = &root->fs_info->data_alloc_cluster; + spin_lock(&cluster->refill_lock); + btrfs_return_cluster_to_free_space(block_group, cluster); + spin_unlock(&cluster->refill_lock); + + /* + * make sure this block group isn't part of a metadata + * allocation cluster + */ + cluster = &root->fs_info->meta_alloc_cluster; + spin_lock(&cluster->refill_lock); + btrfs_return_cluster_to_free_space(block_group, cluster); + spin_unlock(&cluster->refill_lock); + path = btrfs_alloc_path(); BUG_ON(!path); @@ -5988,7 +6029,11 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, spin_unlock(&root->fs_info->block_group_cache_lock); btrfs_remove_free_space_cache(block_group); down_write(&block_group->space_info->groups_sem); - list_del(&block_group->list); + /* + * we must use list_del_init so people can check to see if they + * are still on the list after taking the semaphore + */ + list_del_init(&block_group->list); up_write(&block_group->space_info->groups_sem); spin_lock(&block_group->space_info->lock); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 90c23eb2882..1c8b0190d03 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -3122,6 +3122,7 @@ static noinline void init_btrfs_i(struct inode *inode) bi->flags = 0; bi->index_cnt = (u64)-1; bi->last_unlink_trans = 0; + bi->ordered_data_close = 0; extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS); extent_io_tree_init(&BTRFS_I(inode)->io_tree, inode->i_mapping, GFP_NOFS); @@ -4295,7 +4296,6 @@ out: } if (err) { free_extent_map(em); - WARN_ON(1); return ERR_PTR(err); } return em; diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 5e94ea6e1cb..2624b53ea78 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -437,10 +437,6 @@ out_unlock: return 0; } -/* - * Called inside transaction, so use GFP_NOFS - */ - static int btrfs_ioctl_resize(struct btrfs_root *root, void __user *arg) { u64 new_size; diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 3536bdb2d7c..2ff7cd2db25 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -436,9 +436,9 @@ static int btrfs_show_options(struct seq_file *seq, struct vfsmount *vfs) if (btrfs_test_opt(root, SSD)) seq_puts(seq, ",ssd"); if (btrfs_test_opt(root, NOTREELOG)) - seq_puts(seq, ",no-treelog"); + seq_puts(seq, ",notreelog"); if (btrfs_test_opt(root, FLUSHONCOMMIT)) - seq_puts(seq, ",flush-on-commit"); + seq_puts(seq, ",flushoncommit"); if (!(root->fs_info->sb->s_flags & MS_POSIXACL)) seq_puts(seq, ",noacl"); return 0; @@ -502,8 +502,7 @@ static int btrfs_get_sb(struct file_system_type *fs_type, int flags, if (s->s_root) { if ((flags ^ s->s_flags) & MS_RDONLY) { - up_write(&s->s_umount); - deactivate_super(s); + deactivate_locked_super(s); error = -EBUSY; goto error_close_devices; } @@ -517,8 +516,7 @@ static int btrfs_get_sb(struct file_system_type *fs_type, int flags, error = btrfs_fill_super(s, fs_devices, data, flags & MS_SILENT ? 1 : 0); if (error) { - up_write(&s->s_umount); - deactivate_super(s); + deactivate_locked_super(s); goto error_free_subvol_name; } @@ -535,15 +533,13 @@ static int btrfs_get_sb(struct file_system_type *fs_type, int flags, mutex_unlock(&s->s_root->d_inode->i_mutex); if (IS_ERR(root)) { - up_write(&s->s_umount); - deactivate_super(s); + deactivate_locked_super(s); error = PTR_ERR(root); goto error_free_subvol_name; } if (!root->d_inode) { dput(root); - up_write(&s->s_umount); - deactivate_super(s); + deactivate_locked_super(s); error = -ENXIO; goto error_free_subvol_name; } diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 5f01dad4b69..a6d35b0054c 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -1440,6 +1440,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path) device->io_align = root->sectorsize; device->sector_size = root->sectorsize; device->total_bytes = i_size_read(bdev->bd_inode); + device->disk_total_bytes = device->total_bytes; device->dev_root = root->fs_info->dev_root; device->bdev = bdev; device->in_fs_metadata = 1; diff --git a/fs/buffer.c b/fs/buffer.c index aed297739eb..49106127a4a 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -2736,6 +2736,8 @@ has_buffers: pos += blocksize; } + map_bh.b_size = blocksize; + map_bh.b_state = 0; err = get_block(inode, iblock, &map_bh, 0); if (err) goto unlock; diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h index 19218e1463d..f7c255f9c62 100644 --- a/fs/cachefiles/internal.h +++ b/fs/cachefiles/internal.h @@ -122,13 +122,13 @@ static inline void cachefiles_state_changed(struct cachefiles_cache *cache) } /* - * cf-bind.c + * bind.c */ extern int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args); extern void cachefiles_daemon_unbind(struct cachefiles_cache *cache); /* - * cf-daemon.c + * daemon.c */ extern const struct file_operations cachefiles_daemon_fops; @@ -136,17 +136,17 @@ extern int cachefiles_has_space(struct cachefiles_cache *cache, unsigned fnr, unsigned bnr); /* - * cf-interface.c + * interface.c */ extern const struct fscache_cache_ops cachefiles_cache_ops; /* - * cf-key.c + * key.c */ extern char *cachefiles_cook_key(const u8 *raw, int keylen, uint8_t type); /* - * cf-namei.c + * namei.c */ extern int cachefiles_delete_object(struct cachefiles_cache *cache, struct cachefiles_object *object); @@ -165,7 +165,7 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache, struct dentry *dir, char *filename); /* - * cf-proc.c + * proc.c */ #ifdef CONFIG_CACHEFILES_HISTOGRAM extern atomic_t cachefiles_lookup_histogram[HZ]; @@ -190,7 +190,7 @@ void cachefiles_hist(atomic_t histogram[], unsigned long start_jif) #endif /* - * cf-rdwr.c + * rdwr.c */ extern int cachefiles_read_or_alloc_page(struct fscache_retrieval *, struct page *, gfp_t); @@ -205,7 +205,7 @@ extern int cachefiles_write_page(struct fscache_storage *, struct page *); extern void cachefiles_uncache_page(struct fscache_object *, struct page *); /* - * cf-security.c + * security.c */ extern int cachefiles_get_security_ID(struct cachefiles_cache *cache); extern int cachefiles_determine_cache_security(struct cachefiles_cache *cache, @@ -225,7 +225,7 @@ static inline void cachefiles_end_secure(struct cachefiles_cache *cache, } /* - * cf-xattr.c + * xattr.c */ extern int cachefiles_check_object_type(struct cachefiles_object *object); extern int cachefiles_set_object_xattr(struct cachefiles_object *object, diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 0d6d8b57365..5e6d35804d7 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -35,6 +35,7 @@ #include #include #include +#include #include "cifsfs.h" #include "cifspdu.h" #define DECLARE_GLOBALS_HERE @@ -530,6 +531,7 @@ static void cifs_umount_begin(struct super_block *sb) if (tcon == NULL) return; + lock_kernel(); read_lock(&cifs_tcp_ses_lock); if (tcon->tc_count == 1) tcon->tidStatus = CifsExiting; @@ -548,6 +550,7 @@ static void cifs_umount_begin(struct super_block *sb) } /* BB FIXME - finish add checks for tidStatus BB */ + unlock_kernel(); return; } @@ -599,8 +602,7 @@ cifs_get_sb(struct file_system_type *fs_type, rc = cifs_read_super(sb, data, dev_name, flags & MS_SILENT ? 1 : 0); if (rc) { - up_write(&sb->s_umount); - deactivate_super(sb); + deactivate_locked_super(sb); return rc; } sb->s_flags |= MS_ACTIVE; diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 75e6623a863..d06260251c3 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -2475,7 +2475,7 @@ querySymLinkRetry: /* BB FIXME investigate remapping reserved chars here */ *symlinkinfo = cifs_strndup_from_ucs(data_start, count, is_unicode, nls_codepage); - if (!symlinkinfo) + if (!*symlinkinfo) rc = -ENOMEM; } } @@ -3976,9 +3976,8 @@ parse_DFS_referrals(TRANSACTION2_GET_DFS_REFER_RSP *pSMBr, max_len = data_end - temp; node->path_name = cifs_strndup_from_ucs(temp, max_len, is_unicode, nls_codepage); - if (IS_ERR(node->path_name)) { - rc = PTR_ERR(node->path_name); - node->path_name = NULL; + if (!node->path_name) { + rc = -ENOMEM; goto parse_DFS_referrals_exit; } @@ -3987,11 +3986,8 @@ parse_DFS_referrals(TRANSACTION2_GET_DFS_REFER_RSP *pSMBr, max_len = data_end - temp; node->node_name = cifs_strndup_from_ucs(temp, max_len, is_unicode, nls_codepage); - if (IS_ERR(node->node_name)) { - rc = PTR_ERR(node->node_name); - node->node_name = NULL; - goto parse_DFS_referrals_exit; - } + if (!node->node_name) + rc = -ENOMEM; } parse_DFS_referrals_exit: diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index 11431ed72a7..3758965d73d 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c @@ -225,6 +225,7 @@ int cifs_posix_open(char *full_path, struct inode **pinode, if (!(oflags & FMODE_READ)) write_only = true; + mode &= ~current_umask(); rc = CIFSPOSIXCreate(xid, cifs_sb->tcon, posix_flags, mode, pnetfid, presp_data, &oplock, full_path, cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & @@ -310,7 +311,6 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode, return -ENOMEM; } - mode &= ~current_umask(); if (oplockEnabled) oplock = REQ_OPLOCK; @@ -336,7 +336,7 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode, else /* success, no need to query */ goto cifs_create_set_dentry; } else if ((rc != -EIO) && (rc != -EREMOTE) && - (rc != -EOPNOTSUPP)) /* path not found or net err */ + (rc != -EOPNOTSUPP) && (rc != -EINVAL)) goto cifs_create_out; /* else fallthrough to retry, using older open call, this is case where server does not support this SMB level, and @@ -609,7 +609,6 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry, int xid; int rc = 0; /* to get around spurious gcc warning, set to zero here */ int oplock = 0; - int mode; __u16 fileHandle = 0; bool posix_open = false; struct cifs_sb_info *cifs_sb; @@ -658,30 +657,36 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry, } cFYI(1, ("Full path: %s inode = 0x%p", full_path, direntry->d_inode)); + /* Posix open is only called (at lookup time) for file create now. + * For opens (rather than creates), because we do not know if it + * is a file or directory yet, and current Samba no longer allows + * us to do posix open on dirs, we could end up wasting an open call + * on what turns out to be a dir. For file opens, we wait to call posix + * open till cifs_open. It could be added here (lookup) in the future + * but the performance tradeoff of the extra network request when EISDIR + * or EACCES is returned would have to be weighed against the 50% + * reduction in network traffic in the other paths. + */ if (pTcon->unix_ext) { if (!(nd->flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY)) && - (nd->flags & LOOKUP_OPEN)) { - if (!((nd->intent.open.flags & O_CREAT) && - (nd->intent.open.flags & O_EXCL))) { - mode = nd->intent.open.create_mode & - ~current_umask(); - rc = cifs_posix_open(full_path, &newInode, - parent_dir_inode->i_sb, mode, + (nd->flags & LOOKUP_OPEN) && !pTcon->broken_posix_open && + (nd->intent.open.flags & O_CREAT)) { + rc = cifs_posix_open(full_path, &newInode, + parent_dir_inode->i_sb, + nd->intent.open.create_mode, nd->intent.open.flags, &oplock, &fileHandle, xid); - /* - * This code works around a bug in - * samba posix open in samba versions 3.3.1 - * and earlier where create works - * but open fails with invalid parameter. - * If either of these error codes are - * returned, follow the normal lookup. - * Otherwise, the error during posix open - * is handled. - */ - if ((rc != -EINVAL) && (rc != -EOPNOTSUPP)) - posix_open = true; - } + /* + * The check below works around a bug in POSIX + * open in samba versions 3.3.1 and earlier where + * open could incorrectly fail with invalid parameter. + * If either that or op not supported returned, follow + * the normal lookup. + */ + if ((rc == 0) || (rc == -ENOENT)) + posix_open = true; + else if ((rc == -EINVAL) || (rc != -EOPNOTSUPP)) + pTcon->broken_posix_open = true; } if (!posix_open) rc = cifs_get_inode_info_unix(&newInode, full_path, diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 38c06f82657..302ea15f02e 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -130,10 +130,6 @@ static inline int cifs_posix_open_inode_helper(struct inode *inode, struct cifsFileInfo *pCifsFile, int oplock, u16 netfid) { - file->private_data = kmalloc(sizeof(struct cifsFileInfo), GFP_KERNEL); - if (file->private_data == NULL) - return -ENOMEM; - pCifsFile = cifs_init_private(file->private_data, inode, file, netfid); write_lock(&GlobalSMBSeslock); pCifsInode = CIFS_I(file->f_path.dentry->d_inode); @@ -184,6 +180,38 @@ psx_client_can_cache: return 0; } +static struct cifsFileInfo * +cifs_fill_filedata(struct file *file) +{ + struct list_head *tmp; + struct cifsFileInfo *pCifsFile = NULL; + struct cifsInodeInfo *pCifsInode = NULL; + + /* search inode for this file and fill in file->private_data */ + pCifsInode = CIFS_I(file->f_path.dentry->d_inode); + read_lock(&GlobalSMBSeslock); + list_for_each(tmp, &pCifsInode->openFileList) { + pCifsFile = list_entry(tmp, struct cifsFileInfo, flist); + if ((pCifsFile->pfile == NULL) && + (pCifsFile->pid == current->tgid)) { + /* mode set in cifs_create */ + + /* needed for writepage */ + pCifsFile->pfile = file; + file->private_data = pCifsFile; + break; + } + } + read_unlock(&GlobalSMBSeslock); + + if (file->private_data != NULL) { + return pCifsFile; + } else if ((file->f_flags & O_CREAT) && (file->f_flags & O_EXCL)) + cERROR(1, ("could not find file instance for " + "new file %p", file)); + return NULL; +} + /* all arguments to this function must be checked for validity in caller */ static inline int cifs_open_inode_helper(struct inode *inode, struct file *file, struct cifsInodeInfo *pCifsInode, struct cifsFileInfo *pCifsFile, @@ -258,7 +286,6 @@ int cifs_open(struct inode *inode, struct file *file) struct cifsTconInfo *tcon; struct cifsFileInfo *pCifsFile; struct cifsInodeInfo *pCifsInode; - struct list_head *tmp; char *full_path = NULL; int desiredAccess; int disposition; @@ -270,32 +297,12 @@ int cifs_open(struct inode *inode, struct file *file) cifs_sb = CIFS_SB(inode->i_sb); tcon = cifs_sb->tcon; - /* search inode for this file and fill in file->private_data */ pCifsInode = CIFS_I(file->f_path.dentry->d_inode); - read_lock(&GlobalSMBSeslock); - list_for_each(tmp, &pCifsInode->openFileList) { - pCifsFile = list_entry(tmp, struct cifsFileInfo, - flist); - if ((pCifsFile->pfile == NULL) && - (pCifsFile->pid == current->tgid)) { - /* mode set in cifs_create */ - - /* needed for writepage */ - pCifsFile->pfile = file; - - file->private_data = pCifsFile; - break; - } - } - read_unlock(&GlobalSMBSeslock); - - if (file->private_data != NULL) { - rc = 0; + pCifsFile = cifs_fill_filedata(file); + if (pCifsFile) { FreeXid(xid); - return rc; - } else if ((file->f_flags & O_CREAT) && (file->f_flags & O_EXCL)) - cERROR(1, ("could not find file instance for " - "new file %p", file)); + return 0; + } full_path = build_path_from_dentry(file->f_path.dentry); if (full_path == NULL) { @@ -325,6 +332,7 @@ int cifs_open(struct inode *inode, struct file *file) /* no need for special case handling of setting mode on read only files needed here */ + pCifsFile = cifs_fill_filedata(file); cifs_posix_open_inode_helper(inode, file, pCifsInode, pCifsFile, oplock, netfid); goto out; diff --git a/fs/cifs/link.c b/fs/cifs/link.c index ea9d11e3dcb..cd83c53fcbb 100644 --- a/fs/cifs/link.c +++ b/fs/cifs/link.c @@ -107,48 +107,48 @@ void * cifs_follow_link(struct dentry *direntry, struct nameidata *nd) { struct inode *inode = direntry->d_inode; - int rc = -EACCES; + int rc = -ENOMEM; int xid; char *full_path = NULL; - char *target_path = ERR_PTR(-ENOMEM); - struct cifs_sb_info *cifs_sb; - struct cifsTconInfo *pTcon; + char *target_path = NULL; + struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); + struct cifsTconInfo *tcon = cifs_sb->tcon; xid = GetXid(); - full_path = build_path_from_dentry(direntry); + /* + * For now, we just handle symlinks with unix extensions enabled. + * Eventually we should handle NTFS reparse points, and MacOS + * symlink support. For instance... + * + * rc = CIFSSMBQueryReparseLinkInfo(...) + * + * For now, just return -EACCES when the server doesn't support posix + * extensions. Note that we still allow querying symlinks when posix + * extensions are manually disabled. We could disable these as well + * but there doesn't seem to be any harm in allowing the client to + * read them. + */ + if (!(tcon->ses->capabilities & CAP_UNIX)) { + rc = -EACCES; + goto out; + } + full_path = build_path_from_dentry(direntry); if (!full_path) goto out; cFYI(1, ("Full path: %s inode = 0x%p", full_path, inode)); - cifs_sb = CIFS_SB(inode->i_sb); - pTcon = cifs_sb->tcon; - - /* We could change this to: - if (pTcon->unix_ext) - but there does not seem any point in refusing to - get symlink info if we can, even if unix extensions - turned off for this mount */ - - if (pTcon->ses->capabilities & CAP_UNIX) - rc = CIFSSMBUnixQuerySymLink(xid, pTcon, full_path, - &target_path, - cifs_sb->local_nls); - else { - /* BB add read reparse point symlink code here */ - /* rc = CIFSSMBQueryReparseLinkInfo */ - /* BB Add code to Query ReparsePoint info */ - /* BB Add MAC style xsymlink check here if enabled */ - } + rc = CIFSSMBUnixQuerySymLink(xid, tcon, full_path, &target_path, + cifs_sb->local_nls); + kfree(full_path); +out: if (rc != 0) { kfree(target_path); target_path = ERR_PTR(rc); } - kfree(full_path); -out: FreeXid(xid); nd_set_link(nd, target_path); return NULL; diff --git a/fs/dcache.c b/fs/dcache.c index 1fcffebfb44..75659a6fd1f 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -481,7 +481,7 @@ restart: if ((flags & DCACHE_REFERENCED) && (dentry->d_flags & DCACHE_REFERENCED)) { dentry->d_flags &= ~DCACHE_REFERENCED; - list_move_tail(&dentry->d_lru, &referenced); + list_move(&dentry->d_lru, &referenced); spin_unlock(&dentry->d_lock); } else { list_move_tail(&dentry->d_lru, &tmp); diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c index 63a4a59e414..c68edb96944 100644 --- a/fs/devpts/inode.c +++ b/fs/devpts/inode.c @@ -90,6 +90,15 @@ static inline struct super_block *pts_sb_from_inode(struct inode *inode) #define PARSE_MOUNT 0 #define PARSE_REMOUNT 1 +/* + * parse_mount_options(): + * Set @opts to mount options specified in @data. If an option is not + * specified in @data, set it to its default value. The exception is + * 'newinstance' option which can only be set/cleared on a mount (i.e. + * cannot be changed during remount). + * + * Note: @data may be NULL (in which case all options are set to default). + */ static int parse_mount_options(char *data, int op, struct pts_mount_opts *opts) { char *p; @@ -355,12 +364,9 @@ static int devpts_get_sb(struct file_system_type *fs_type, struct pts_mount_opts opts; struct super_block *s; - memset(&opts, 0, sizeof(opts)); - if (data) { - error = parse_mount_options(data, PARSE_MOUNT, &opts); - if (error) - return error; - } + error = parse_mount_options(data, PARSE_MOUNT, &opts); + if (error) + return error; if (opts.newinstance) s = sget(fs_type, NULL, set_anon_super, NULL); @@ -389,11 +395,10 @@ static int devpts_get_sb(struct file_system_type *fs_type, return 0; out_dput: - dput(s->s_root); + dput(s->s_root); /* undo dget() in simple_set_mnt() */ out_undo_sget: - up_write(&s->s_umount); - deactivate_super(s); + deactivate_locked_super(s); return error; } diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c index ccabd5faa04..9f0aa9883c2 100644 --- a/fs/ecryptfs/main.c +++ b/fs/ecryptfs/main.c @@ -614,9 +614,8 @@ static int ecryptfs_get_sb(struct file_system_type *fs_type, int flags, } goto out; out_abort: - dput(sb->s_root); - up_write(&sb->s_umount); - deactivate_super(sb); + dput(sb->s_root); /* aka mnt->mnt_root, as set by get_sb_nodev() */ + deactivate_locked_super(sb); out: return rc; } diff --git a/fs/eventpoll.c b/fs/eventpoll.c index a89f370fadb..5458e80fc55 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -1212,7 +1212,7 @@ SYSCALL_DEFINE1(epoll_create1, int, flags) SYSCALL_DEFINE1(epoll_create, int, size) { - if (size < 0) + if (size <= 0) return -EINVAL; return sys_epoll_create1(0); diff --git a/fs/exec.c b/fs/exec.c index 639177b0eea..895823d0149 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -105,40 +105,28 @@ static inline void put_binfmt(struct linux_binfmt * fmt) SYSCALL_DEFINE1(uselib, const char __user *, library) { struct file *file; - struct nameidata nd; char *tmp = getname(library); int error = PTR_ERR(tmp); - if (!IS_ERR(tmp)) { - error = path_lookup_open(AT_FDCWD, tmp, - LOOKUP_FOLLOW, &nd, - FMODE_READ|FMODE_EXEC); - putname(tmp); - } - if (error) + if (IS_ERR(tmp)) goto out; - error = -EINVAL; - if (!S_ISREG(nd.path.dentry->d_inode->i_mode)) - goto exit; - - error = -EACCES; - if (nd.path.mnt->mnt_flags & MNT_NOEXEC) - goto exit; - - error = inode_permission(nd.path.dentry->d_inode, - MAY_READ | MAY_EXEC | MAY_OPEN); - if (error) - goto exit; - error = ima_path_check(&nd.path, MAY_READ | MAY_EXEC | MAY_OPEN); - if (error) - goto exit; - - file = nameidata_to_filp(&nd, O_RDONLY|O_LARGEFILE); + file = do_filp_open(AT_FDCWD, tmp, + O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0, + MAY_READ | MAY_EXEC | MAY_OPEN); + putname(tmp); error = PTR_ERR(file); if (IS_ERR(file)) goto out; + error = -EINVAL; + if (!S_ISREG(file->f_path.dentry->d_inode->i_mode)) + goto exit; + + error = -EACCES; + if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) + goto exit; + fsnotify_open(file->f_path.dentry); error = -ENOEXEC; @@ -160,13 +148,10 @@ SYSCALL_DEFINE1(uselib, const char __user *, library) } read_unlock(&binfmt_lock); } +exit: fput(file); out: return error; -exit: - release_open_intent(&nd); - path_put(&nd.path); - goto out; } #ifdef CONFIG_MMU @@ -661,47 +646,33 @@ EXPORT_SYMBOL(setup_arg_pages); struct file *open_exec(const char *name) { - struct nameidata nd; struct file *file; int err; - err = path_lookup_open(AT_FDCWD, name, LOOKUP_FOLLOW, &nd, - FMODE_READ|FMODE_EXEC); - if (err) + file = do_filp_open(AT_FDCWD, name, + O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0, + MAY_EXEC | MAY_OPEN); + if (IS_ERR(file)) goto out; err = -EACCES; - if (!S_ISREG(nd.path.dentry->d_inode->i_mode)) - goto out_path_put; + if (!S_ISREG(file->f_path.dentry->d_inode->i_mode)) + goto exit; - if (nd.path.mnt->mnt_flags & MNT_NOEXEC) - goto out_path_put; - - err = inode_permission(nd.path.dentry->d_inode, MAY_EXEC | MAY_OPEN); - if (err) - goto out_path_put; - err = ima_path_check(&nd.path, MAY_EXEC | MAY_OPEN); - if (err) - goto out_path_put; - - file = nameidata_to_filp(&nd, O_RDONLY|O_LARGEFILE); - if (IS_ERR(file)) - return file; + if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) + goto exit; fsnotify_open(file->f_path.dentry); err = deny_write_access(file); - if (err) { - fput(file); - goto out; - } + if (err) + goto exit; +out: return file; - out_path_put: - release_open_intent(&nd); - path_put(&nd.path); - out: +exit: + fput(file); return ERR_PTR(err); } EXPORT_SYMBOL(open_exec); diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index e4033215834..e3a55eb8b26 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -1841,11 +1841,13 @@ ext4_ext_put_in_cache(struct inode *inode, ext4_lblk_t block, { struct ext4_ext_cache *cex; BUG_ON(len == 0); + spin_lock(&EXT4_I(inode)->i_block_reservation_lock); cex = &EXT4_I(inode)->i_cached_extent; cex->ec_type = type; cex->ec_block = block; cex->ec_len = len; cex->ec_start = start; + spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); } /* @@ -1902,12 +1904,17 @@ ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block, struct ext4_extent *ex) { struct ext4_ext_cache *cex; + int ret = EXT4_EXT_CACHE_NO; + /* + * We borrow i_block_reservation_lock to protect i_cached_extent + */ + spin_lock(&EXT4_I(inode)->i_block_reservation_lock); cex = &EXT4_I(inode)->i_cached_extent; /* has cache valid data? */ if (cex->ec_type == EXT4_EXT_CACHE_NO) - return EXT4_EXT_CACHE_NO; + goto errout; BUG_ON(cex->ec_type != EXT4_EXT_CACHE_GAP && cex->ec_type != EXT4_EXT_CACHE_EXTENT); @@ -1918,11 +1925,11 @@ ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block, ext_debug("%u cached by %u:%u:%llu\n", block, cex->ec_block, cex->ec_len, cex->ec_start); - return cex->ec_type; + ret = cex->ec_type; } - - /* not in cache */ - return EXT4_EXT_CACHE_NO; +errout: + spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); + return ret; } /* @@ -2875,6 +2882,8 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, if (allocated > max_blocks) allocated = max_blocks; set_buffer_unwritten(bh_result); + bh_result->b_bdev = inode->i_sb->s_bdev; + bh_result->b_blocknr = newblock; goto out2; } diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index e91f978c7f1..2a9ffd528dd 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -1149,6 +1149,7 @@ int ext4_get_blocks_wrap(handle_t *handle, struct inode *inode, sector_t block, int retval; clear_buffer_mapped(bh); + clear_buffer_unwritten(bh); /* * Try to see if we can get the block without requesting @@ -1178,6 +1179,18 @@ int ext4_get_blocks_wrap(handle_t *handle, struct inode *inode, sector_t block, if (retval > 0 && buffer_mapped(bh)) return retval; + /* + * When we call get_blocks without the create flag, the + * BH_Unwritten flag could have gotten set if the blocks + * requested were part of a uninitialized extent. We need to + * clear this flag now that we are committed to convert all or + * part of the uninitialized extent to be an initialized + * extent. This is because we need to avoid the combination + * of BH_Unwritten and BH_Mapped flags being simultaneously + * set on the buffer_head. + */ + clear_buffer_unwritten(bh); + /* * New blocks allocate and/or writing to uninitialized extent * will possibly result in updating i_data, so we take @@ -2297,6 +2310,10 @@ static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) { int ret = 0; + sector_t invalid_block = ~((sector_t) 0xffff); + + if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es)) + invalid_block = ~0; BUG_ON(create == 0); BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize); @@ -2318,11 +2335,18 @@ static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock, /* not enough space to reserve */ return ret; - map_bh(bh_result, inode->i_sb, 0); + map_bh(bh_result, inode->i_sb, invalid_block); set_buffer_new(bh_result); set_buffer_delay(bh_result); } else if (ret > 0) { bh_result->b_size = (ret << inode->i_blkbits); + /* + * With sub-block writes into unwritten extents + * we also need to mark the buffer as new so that + * the unwritten parts of the buffer gets correctly zeroed. + */ + if (buffer_unwritten(bh_result)) + set_buffer_new(bh_result); ret = 0; } diff --git a/fs/fcntl.c b/fs/fcntl.c index cc8e4de2fee..1ad703150de 100644 --- a/fs/fcntl.c +++ b/fs/fcntl.c @@ -117,11 +117,13 @@ SYSCALL_DEFINE2(dup2, unsigned int, oldfd, unsigned int, newfd) { if (unlikely(newfd == oldfd)) { /* corner case */ struct files_struct *files = current->files; + int retval = oldfd; + rcu_read_lock(); if (!fcheck_files(files, oldfd)) - oldfd = -EBADF; + retval = -EBADF; rcu_read_unlock(); - return oldfd; + return retval; } return sys_dup3(oldfd, newfd, 0); } diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h index e0cbd16f6dc..1c341304621 100644 --- a/fs/fscache/internal.h +++ b/fs/fscache/internal.h @@ -28,7 +28,7 @@ #define FSCACHE_MAX_THREADS 32 /* - * fsc-cache.c + * cache.c */ extern struct list_head fscache_cache_list; extern struct rw_semaphore fscache_addremove_sem; @@ -37,7 +37,7 @@ extern struct fscache_cache *fscache_select_cache_for_object( struct fscache_cookie *); /* - * fsc-cookie.c + * cookie.c */ extern struct kmem_cache *fscache_cookie_jar; @@ -45,13 +45,13 @@ extern void fscache_cookie_init_once(void *); extern void __fscache_cookie_put(struct fscache_cookie *); /* - * fsc-fsdef.c + * fsdef.c */ extern struct fscache_cookie fscache_fsdef_index; extern struct fscache_cookie_def fscache_fsdef_netfs_def; /* - * fsc-histogram.c + * histogram.c */ #ifdef CONFIG_FSCACHE_HISTOGRAM extern atomic_t fscache_obj_instantiate_histogram[HZ]; @@ -75,7 +75,7 @@ extern const struct file_operations fscache_histogram_fops; #endif /* - * fsc-main.c + * main.c */ extern unsigned fscache_defer_lookup; extern unsigned fscache_defer_create; @@ -86,14 +86,14 @@ extern int fscache_wait_bit(void *); extern int fscache_wait_bit_interruptible(void *); /* - * fsc-object.c + * object.c */ extern void fscache_withdrawing_object(struct fscache_cache *, struct fscache_object *); extern void fscache_enqueue_object(struct fscache_object *); /* - * fsc-operation.c + * operation.c */ extern int fscache_submit_exclusive_op(struct fscache_object *, struct fscache_operation *); @@ -104,7 +104,7 @@ extern void fscache_start_operations(struct fscache_object *); extern void fscache_operation_gc(struct work_struct *); /* - * fsc-proc.c + * proc.c */ #ifdef CONFIG_PROC_FS extern int __init fscache_proc_init(void); @@ -115,7 +115,7 @@ extern void fscache_proc_cleanup(void); #endif /* - * fsc-stats.c + * stats.c */ #ifdef CONFIG_FSCACHE_STATS extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS]; diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 459b73dd45e..91f7c85f1ff 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -19,6 +19,7 @@ #include #include #include +#include MODULE_AUTHOR("Miklos Szeredi "); MODULE_DESCRIPTION("Filesystem in Userspace"); @@ -259,7 +260,9 @@ struct inode *fuse_iget(struct super_block *sb, u64 nodeid, static void fuse_umount_begin(struct super_block *sb) { + lock_kernel(); fuse_abort_conn(get_fuse_conn_super(sb)); + unlock_kernel(); } static void fuse_send_destroy(struct fuse_conn *fc) @@ -908,6 +911,7 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent) err_put_root: dput(root_dentry); err_put_conn: + bdi_destroy(&fc->bdi); fuse_conn_put(fc); err_fput: fput(file); diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 1afd9f26bcb..ff498109048 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -1304,6 +1304,7 @@ static int gfs2_shrink_glock_memory(int nr, gfp_t gfp_mask) nr--; if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) gfs2_glock_put(gl); + got_ref = 0; } spin_lock(&lru_lock); if (may_demote) diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index 650a730707b..1ff9473ea75 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c @@ -1282,21 +1282,21 @@ static int gfs2_get_sb(struct file_system_type *fs_type, int flags, static struct super_block *get_gfs2_sb(const char *dev_name) { struct super_block *sb; - struct nameidata nd; + struct path path; int error; - error = path_lookup(dev_name, LOOKUP_FOLLOW, &nd); + error = kern_path(dev_name, LOOKUP_FOLLOW, &path); if (error) { printk(KERN_WARNING "GFS2: path_lookup on %s returned error %d\n", dev_name, error); return NULL; } - sb = nd.path.dentry->d_inode->i_sb; + sb = path.dentry->d_inode->i_sb; if (sb && (sb->s_type == &gfs2_fs_type)) atomic_inc(&sb->s_active); else sb = NULL; - path_put(&nd.path); + path_put(&path); return sb; } diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c index fecf402d7b8..fc77965be84 100644 --- a/fs/hpfs/super.c +++ b/fs/hpfs/super.c @@ -423,8 +423,7 @@ static int hpfs_remount_fs(struct super_block *s, int *flags, char *data) if (!(*flags & MS_RDONLY)) mark_dirty(s); - kfree(s->s_options); - s->s_options = new_opts; + replace_mount_options(s, new_opts); return 0; diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 153d9681192..c1462d43e72 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -312,16 +312,6 @@ out: return retval; } -/* - * Read a page. Again trivial. If it didn't already exist - * in the page cache, it is zero-filled. - */ -static int hugetlbfs_readpage(struct file *file, struct page * page) -{ - unlock_page(page); - return -EINVAL; -} - static int hugetlbfs_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, @@ -701,7 +691,6 @@ static void hugetlbfs_destroy_inode(struct inode *inode) } static const struct address_space_operations hugetlbfs_aops = { - .readpage = hugetlbfs_readpage, .write_begin = hugetlbfs_write_begin, .write_end = hugetlbfs_write_end, .set_page_dirty = hugetlbfs_set_page_dirty, diff --git a/fs/inode.c b/fs/inode.c index 6ad14a1cd8c..bca0c618fdb 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -99,7 +99,7 @@ static DEFINE_MUTEX(iprune_mutex); */ struct inodes_stat_t inodes_stat; -static struct kmem_cache * inode_cachep __read_mostly; +static struct kmem_cache *inode_cachep __read_mostly; static void wake_up_inode(struct inode *inode) { @@ -124,7 +124,7 @@ struct inode *inode_init_always(struct super_block *sb, struct inode *inode) static struct inode_operations empty_iops; static const struct file_operations empty_fops; - struct address_space * const mapping = &inode->i_data; + struct address_space *const mapping = &inode->i_data; inode->i_sb = sb; inode->i_blkbits = sb->s_blocksize_bits; @@ -216,9 +216,10 @@ static struct inode *alloc_inode(struct super_block *sb) return NULL; } -void destroy_inode(struct inode *inode) +void destroy_inode(struct inode *inode) { BUG_ON(inode_has_buffers(inode)); + ima_inode_free(inode); security_inode_free(inode); if (inode->i_sb->s_op->destroy_inode) inode->i_sb->s_op->destroy_inode(inode); @@ -252,12 +253,11 @@ void inode_init_once(struct inode *inode) mutex_init(&inode->inotify_mutex); #endif } - EXPORT_SYMBOL(inode_init_once); static void init_once(void *foo) { - struct inode * inode = (struct inode *) foo; + struct inode *inode = (struct inode *) foo; inode_init_once(inode); } @@ -265,7 +265,7 @@ static void init_once(void *foo) /* * inode_lock must be held */ -void __iget(struct inode * inode) +void __iget(struct inode *inode) { if (atomic_read(&inode->i_count)) { atomic_inc(&inode->i_count); @@ -289,7 +289,7 @@ void clear_inode(struct inode *inode) { might_sleep(); invalidate_inode_buffers(inode); - + BUG_ON(inode->i_data.nrpages); BUG_ON(!(inode->i_state & I_FREEING)); BUG_ON(inode->i_state & I_CLEAR); @@ -303,7 +303,6 @@ void clear_inode(struct inode *inode) cd_forget(inode); inode->i_state = I_CLEAR; } - EXPORT_SYMBOL(clear_inode); /* @@ -351,8 +350,8 @@ static int invalidate_list(struct list_head *head, struct list_head *dispose) next = head->next; for (;;) { - struct list_head * tmp = next; - struct inode * inode; + struct list_head *tmp = next; + struct inode *inode; /* * We can reschedule here without worrying about the list's @@ -391,7 +390,7 @@ static int invalidate_list(struct list_head *head, struct list_head *dispose) * fails because there are busy inodes then a non zero value is returned. * If the discard is successful all the inodes have been discarded. */ -int invalidate_inodes(struct super_block * sb) +int invalidate_inodes(struct super_block *sb) { int busy; LIST_HEAD(throw_away); @@ -407,7 +406,6 @@ int invalidate_inodes(struct super_block * sb) return busy; } - EXPORT_SYMBOL(invalidate_inodes); static int can_unuse(struct inode *inode) @@ -504,7 +502,7 @@ static int shrink_icache_memory(int nr, gfp_t gfp_mask) * Nasty deadlock avoidance. We may hold various FS locks, * and we don't want to recurse into the FS that called us * in clear_inode() and friends.. - */ + */ if (!(gfp_mask & __GFP_FS)) return -1; prune_icache(nr); @@ -524,10 +522,13 @@ static void __wait_on_freeing_inode(struct inode *inode); * by hand after calling find_inode now! This simplifies iunique and won't * add any additional branch in the common code. */ -static struct inode * find_inode(struct super_block * sb, struct hlist_head *head, int (*test)(struct inode *, void *), void *data) +static struct inode *find_inode(struct super_block *sb, + struct hlist_head *head, + int (*test)(struct inode *, void *), + void *data) { struct hlist_node *node; - struct inode * inode = NULL; + struct inode *inode = NULL; repeat: hlist_for_each_entry(inode, node, head, i_hash) { @@ -548,10 +549,11 @@ repeat: * find_inode_fast is the fast path version of find_inode, see the comment at * iget_locked for details. */ -static struct inode * find_inode_fast(struct super_block * sb, struct hlist_head *head, unsigned long ino) +static struct inode *find_inode_fast(struct super_block *sb, + struct hlist_head *head, unsigned long ino) { struct hlist_node *node; - struct inode * inode = NULL; + struct inode *inode = NULL; repeat: hlist_for_each_entry(inode, node, head, i_hash) { @@ -631,10 +633,10 @@ struct inode *new_inode(struct super_block *sb) * here to attempt to avoid that. */ static unsigned int last_ino; - struct inode * inode; + struct inode *inode; spin_lock_prefetch(&inode_lock); - + inode = alloc_inode(sb); if (inode) { spin_lock(&inode_lock); @@ -645,7 +647,6 @@ struct inode *new_inode(struct super_block *sb) } return inode; } - EXPORT_SYMBOL(new_inode); void unlock_new_inode(struct inode *inode) @@ -674,7 +675,6 @@ void unlock_new_inode(struct inode *inode) inode->i_state &= ~(I_LOCK|I_NEW); wake_up_inode(inode); } - EXPORT_SYMBOL(unlock_new_inode); /* @@ -683,13 +683,17 @@ EXPORT_SYMBOL(unlock_new_inode); * We no longer cache the sb_flags in i_flags - see fs.h * -- rmk@arm.uk.linux.org */ -static struct inode * get_new_inode(struct super_block *sb, struct hlist_head *head, int (*test)(struct inode *, void *), int (*set)(struct inode *, void *), void *data) +static struct inode *get_new_inode(struct super_block *sb, + struct hlist_head *head, + int (*test)(struct inode *, void *), + int (*set)(struct inode *, void *), + void *data) { - struct inode * inode; + struct inode *inode; inode = alloc_inode(sb); if (inode) { - struct inode * old; + struct inode *old; spin_lock(&inode_lock); /* We released the lock, so.. */ @@ -731,13 +735,14 @@ set_failed: * get_new_inode_fast is the fast path version of get_new_inode, see the * comment at iget_locked for details. */ -static struct inode * get_new_inode_fast(struct super_block *sb, struct hlist_head *head, unsigned long ino) +static struct inode *get_new_inode_fast(struct super_block *sb, + struct hlist_head *head, unsigned long ino) { - struct inode * inode; + struct inode *inode; inode = alloc_inode(sb); if (inode) { - struct inode * old; + struct inode *old; spin_lock(&inode_lock); /* We released the lock, so.. */ @@ -823,7 +828,6 @@ struct inode *igrab(struct inode *inode) spin_unlock(&inode_lock); return inode; } - EXPORT_SYMBOL(igrab); /** @@ -924,7 +928,6 @@ struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval, return ifind(sb, head, test, data, 0); } - EXPORT_SYMBOL(ilookup5_nowait); /** @@ -953,7 +956,6 @@ struct inode *ilookup5(struct super_block *sb, unsigned long hashval, return ifind(sb, head, test, data, 1); } - EXPORT_SYMBOL(ilookup5); /** @@ -976,7 +978,6 @@ struct inode *ilookup(struct super_block *sb, unsigned long ino) return ifind_fast(sb, head, ino); } - EXPORT_SYMBOL(ilookup); /** @@ -1015,7 +1016,6 @@ struct inode *iget5_locked(struct super_block *sb, unsigned long hashval, */ return get_new_inode(sb, head, test, set, data); } - EXPORT_SYMBOL(iget5_locked); /** @@ -1047,7 +1047,6 @@ struct inode *iget_locked(struct super_block *sb, unsigned long ino) */ return get_new_inode_fast(sb, head, ino); } - EXPORT_SYMBOL(iget_locked); int insert_inode_locked(struct inode *inode) @@ -1055,13 +1054,22 @@ int insert_inode_locked(struct inode *inode) struct super_block *sb = inode->i_sb; ino_t ino = inode->i_ino; struct hlist_head *head = inode_hashtable + hash(sb, ino); - struct inode *old; inode->i_state |= I_LOCK|I_NEW; while (1) { + struct hlist_node *node; + struct inode *old = NULL; spin_lock(&inode_lock); - old = find_inode_fast(sb, head, ino); - if (likely(!old)) { + hlist_for_each_entry(old, node, head, i_hash) { + if (old->i_ino != ino) + continue; + if (old->i_sb != sb) + continue; + if (old->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE)) + continue; + break; + } + if (likely(!node)) { hlist_add_head(&inode->i_hash, head); spin_unlock(&inode_lock); return 0; @@ -1076,7 +1084,6 @@ int insert_inode_locked(struct inode *inode) iput(old); } } - EXPORT_SYMBOL(insert_inode_locked); int insert_inode_locked4(struct inode *inode, unsigned long hashval, @@ -1084,14 +1091,24 @@ int insert_inode_locked4(struct inode *inode, unsigned long hashval, { struct super_block *sb = inode->i_sb; struct hlist_head *head = inode_hashtable + hash(sb, hashval); - struct inode *old; inode->i_state |= I_LOCK|I_NEW; while (1) { + struct hlist_node *node; + struct inode *old = NULL; + spin_lock(&inode_lock); - old = find_inode(sb, head, test, data); - if (likely(!old)) { + hlist_for_each_entry(old, node, head, i_hash) { + if (old->i_sb != sb) + continue; + if (!test(old, data)) + continue; + if (old->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE)) + continue; + break; + } + if (likely(!node)) { hlist_add_head(&inode->i_hash, head); spin_unlock(&inode_lock); return 0; @@ -1106,7 +1123,6 @@ int insert_inode_locked4(struct inode *inode, unsigned long hashval, iput(old); } } - EXPORT_SYMBOL(insert_inode_locked4); /** @@ -1124,7 +1140,6 @@ void __insert_inode_hash(struct inode *inode, unsigned long hashval) hlist_add_head(&inode->i_hash, head); spin_unlock(&inode_lock); } - EXPORT_SYMBOL(__insert_inode_hash); /** @@ -1139,7 +1154,6 @@ void remove_inode_hash(struct inode *inode) hlist_del_init(&inode->i_hash); spin_unlock(&inode_lock); } - EXPORT_SYMBOL(remove_inode_hash); /* @@ -1187,7 +1201,6 @@ void generic_delete_inode(struct inode *inode) BUG_ON(inode->i_state != I_CLEAR); destroy_inode(inode); } - EXPORT_SYMBOL(generic_delete_inode); static void generic_forget_inode(struct inode *inode) @@ -1237,12 +1250,11 @@ void generic_drop_inode(struct inode *inode) else generic_forget_inode(inode); } - EXPORT_SYMBOL_GPL(generic_drop_inode); /* * Called when we're dropping the last reference - * to an inode. + * to an inode. * * Call the FS "drop()" function, defaulting to * the legacy UNIX filesystem behaviour.. @@ -1262,7 +1274,7 @@ static inline void iput_final(struct inode *inode) } /** - * iput - put an inode + * iput - put an inode * @inode: inode to put * * Puts an inode, dropping its usage count. If the inode use count hits @@ -1279,7 +1291,6 @@ void iput(struct inode *inode) iput_final(inode); } } - EXPORT_SYMBOL(iput); /** @@ -1290,10 +1301,10 @@ EXPORT_SYMBOL(iput); * Returns the block number on the device holding the inode that * is the disk block number for the block of the file requested. * That is, asked for block 4 of inode 1 the function will return the - * disk block relative to the disk start that holds that block of the + * disk block relative to the disk start that holds that block of the * file. */ -sector_t bmap(struct inode * inode, sector_t block) +sector_t bmap(struct inode *inode, sector_t block) { sector_t res = 0; if (inode->i_mapping->a_ops->bmap) @@ -1425,7 +1436,6 @@ void file_update_time(struct file *file) mark_inode_dirty_sync(inode); mnt_drop_write(file->f_path.mnt); } - EXPORT_SYMBOL(file_update_time); int inode_needs_sync(struct inode *inode) @@ -1436,7 +1446,6 @@ int inode_needs_sync(struct inode *inode) return 1; return 0; } - EXPORT_SYMBOL(inode_needs_sync); int inode_wait(void *word) diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c index 06560c520f4..618e21c0b7a 100644 --- a/fs/jbd/commit.c +++ b/fs/jbd/commit.c @@ -241,7 +241,7 @@ write_out_data: spin_lock(&journal->j_list_lock); } /* Someone already cleaned up the buffer? */ - if (!buffer_jbd(bh) + if (!buffer_jbd(bh) || bh2jh(bh) != jh || jh->b_transaction != commit_transaction || jh->b_jlist != BJ_SyncData) { jbd_unlock_bh_state(bh); @@ -478,7 +478,9 @@ void journal_commit_transaction(journal_t *journal) spin_lock(&journal->j_list_lock); continue; } - if (buffer_jbd(bh) && jh->b_jlist == BJ_Locked) { + if (buffer_jbd(bh) && bh2jh(bh) == jh && + jh->b_transaction == commit_transaction && + jh->b_jlist == BJ_Locked) { __journal_unfile_buffer(jh); jbd_unlock_bh_state(bh); journal_remove_journal_head(bh); diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c index c32b4a1ad6c..a0244740b75 100644 --- a/fs/jffs2/erase.c +++ b/fs/jffs2/erase.c @@ -480,13 +480,6 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb return; filebad: - mutex_lock(&c->erase_free_sem); - spin_lock(&c->erase_completion_lock); - /* Stick it on a list (any list) so erase_failed can take it - right off again. Silly, but shouldn't happen often. */ - list_move(&jeb->list, &c->erasing_list); - spin_unlock(&c->erase_completion_lock); - mutex_unlock(&c->erase_free_sem); jffs2_erase_failed(c, jeb, bad_offset); return; diff --git a/fs/libfs.c b/fs/libfs.c index cd223190c4e..80046ddf506 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -246,8 +246,7 @@ int get_sb_pseudo(struct file_system_type *fs_type, char *name, return 0; Enomem: - up_write(&s->s_umount); - deactivate_super(s); + deactivate_locked_super(s); return -ENOMEM; } diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c index abf83881f68..1a54ae14a19 100644 --- a/fs/lockd/svc.c +++ b/fs/lockd/svc.c @@ -104,6 +104,16 @@ static void set_grace_period(void) schedule_delayed_work(&grace_period_end, grace_period); } +static void restart_grace(void) +{ + if (nlmsvc_ops) { + cancel_delayed_work_sync(&grace_period_end); + locks_end_grace(&lockd_manager); + nlmsvc_invalidate_all(); + set_grace_period(); + } +} + /* * This is the lockd kernel thread */ @@ -149,10 +159,7 @@ lockd(void *vrqstp) if (signalled()) { flush_signals(current); - if (nlmsvc_ops) { - nlmsvc_invalidate_all(); - set_grace_period(); - } + restart_grace(); continue; } diff --git a/fs/namei.c b/fs/namei.c index 78f253cd2d4..967c3db9272 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -1130,8 +1130,8 @@ int vfs_path_lookup(struct dentry *dentry, struct vfsmount *mnt, * @nd: pointer to nameidata * @open_flags: open intent flags */ -int path_lookup_open(int dfd, const char *name, unsigned int lookup_flags, - struct nameidata *nd, int open_flags) +static int path_lookup_open(int dfd, const char *name, + unsigned int lookup_flags, struct nameidata *nd, int open_flags) { struct file *filp = get_empty_filp(); int err; @@ -1637,18 +1637,19 @@ static int open_will_write_to_fs(int flag, struct inode *inode) * open_to_namei_flags() for more details. */ struct file *do_filp_open(int dfd, const char *pathname, - int open_flag, int mode) + int open_flag, int mode, int acc_mode) { struct file *filp; struct nameidata nd; - int acc_mode, error; + int error; struct path path; struct dentry *dir; int count = 0; int will_write; int flag = open_to_namei_flags(open_flag); - acc_mode = MAY_OPEN | ACC_MODE(flag); + if (!acc_mode) + acc_mode = MAY_OPEN | ACC_MODE(flag); /* O_TRUNC implies we need access checks for write permissions */ if (flag & O_TRUNC) @@ -1869,7 +1870,7 @@ do_link: */ struct file *filp_open(const char *filename, int flags, int mode) { - return do_filp_open(AT_FDCWD, filename, flags, mode); + return do_filp_open(AT_FDCWD, filename, flags, mode, 0); } EXPORT_SYMBOL(filp_open); diff --git a/fs/namespace.c b/fs/namespace.c index 41196209a90..134d494158d 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -695,12 +695,16 @@ static inline void mangle(struct seq_file *m, const char *s) */ int generic_show_options(struct seq_file *m, struct vfsmount *mnt) { - const char *options = mnt->mnt_sb->s_options; + const char *options; + + rcu_read_lock(); + options = rcu_dereference(mnt->mnt_sb->s_options); if (options != NULL && options[0]) { seq_putc(m, ','); mangle(m, options); } + rcu_read_unlock(); return 0; } @@ -721,11 +725,22 @@ EXPORT_SYMBOL(generic_show_options); */ void save_mount_options(struct super_block *sb, char *options) { - kfree(sb->s_options); - sb->s_options = kstrdup(options, GFP_KERNEL); + BUG_ON(sb->s_options); + rcu_assign_pointer(sb->s_options, kstrdup(options, GFP_KERNEL)); } EXPORT_SYMBOL(save_mount_options); +void replace_mount_options(struct super_block *sb, char *options) +{ + char *old = sb->s_options; + rcu_assign_pointer(sb->s_options, options); + if (old) { + synchronize_rcu(); + kfree(old); + } +} +EXPORT_SYMBOL(replace_mount_options); + #ifdef CONFIG_PROC_FS /* iterator */ static void *m_start(struct seq_file *m, loff_t *pos) @@ -1073,9 +1088,7 @@ static int do_umount(struct vfsmount *mnt, int flags) */ if (flags & MNT_FORCE && sb->s_op->umount_begin) { - lock_kernel(); sb->s_op->umount_begin(sb); - unlock_kernel(); } /* diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 370b190a09d..89f98e9a024 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -1943,7 +1943,8 @@ int nfs_permission(struct inode *inode, int mask) case S_IFREG: /* NFSv4 has atomic_open... */ if (nfs_server_capable(inode, NFS_CAP_ATOMIC_OPEN) - && (mask & MAY_OPEN)) + && (mask & MAY_OPEN) + && !(mask & MAY_EXEC)) goto out; break; case S_IFDIR: diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index a4d24268029..4674f8092da 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -2594,12 +2594,9 @@ static void nfs4_renew_done(struct rpc_task *task, void *data) unsigned long timestamp = (unsigned long)data; if (task->tk_status < 0) { - switch (task->tk_status) { - case -NFS4ERR_STALE_CLIENTID: - case -NFS4ERR_EXPIRED: - case -NFS4ERR_CB_PATH_DOWN: - nfs4_schedule_state_recovery(clp); - } + /* Unless we're shutting down, schedule state recovery! */ + if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) != 0) + nfs4_schedule_state_recovery(clp); return; } spin_lock(&clp->cl_lock); diff --git a/fs/nfs/nfsroot.c b/fs/nfs/nfsroot.c index d9ef602fbc5..e3ed5908820 100644 --- a/fs/nfs/nfsroot.c +++ b/fs/nfs/nfsroot.c @@ -129,7 +129,7 @@ enum { Opt_err }; -static match_table_t __initconst tokens = { +static const match_table_t tokens __initconst = { {Opt_port, "port=%u"}, {Opt_rsize, "rsize=%u"}, {Opt_wsize, "wsize=%u"}, diff --git a/fs/nfs/super.c b/fs/nfs/super.c index 6717200923f..d2d67781c57 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -683,9 +683,12 @@ static int nfs_show_stats(struct seq_file *m, struct vfsmount *mnt) */ static void nfs_umount_begin(struct super_block *sb) { - struct nfs_server *server = NFS_SB(sb); + struct nfs_server *server; struct rpc_clnt *rpc; + lock_kernel(); + + server = NFS_SB(sb); /* -EIO all pending I/O */ rpc = server->client_acl; if (!IS_ERR(rpc)) @@ -693,6 +696,8 @@ static void nfs_umount_begin(struct super_block *sb) rpc = server->client; if (!IS_ERR(rpc)) rpc_killall_tasks(rpc); + + unlock_kernel(); } /* @@ -2106,8 +2111,7 @@ out_err_nosb: error_splat_root: dput(mntroot); error_splat_super: - up_write(&s->s_umount); - deactivate_super(s); + deactivate_locked_super(s); goto out; } @@ -2203,8 +2207,7 @@ out_err_noserver: return error; error_splat_super: - up_write(&s->s_umount); - deactivate_super(s); + deactivate_locked_super(s); dprintk("<-- nfs_xdev_get_sb() = %d [splat]\n", error); return error; } @@ -2464,8 +2467,7 @@ out_free: error_splat_root: dput(mntroot); error_splat_super: - up_write(&s->s_umount); - deactivate_super(s); + deactivate_locked_super(s); goto out; } @@ -2559,8 +2561,7 @@ out_err_noserver: return error; error_splat_super: - up_write(&s->s_umount); - deactivate_super(s); + deactivate_locked_super(s); dprintk("<-- nfs4_xdev_get_sb() = %d [splat]\n", error); return error; } @@ -2644,8 +2645,7 @@ out_err_noserver: return error; error_splat_super: - up_write(&s->s_umount); - deactivate_super(s); + deactivate_locked_super(s); dprintk("<-- nfs4_referral_get_sb() = %d [splat]\n", error); return error; } diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c index 5275097a756..b5348405046 100644 --- a/fs/nfsd/nfs4recover.c +++ b/fs/nfsd/nfs4recover.c @@ -229,7 +229,7 @@ nfsd4_list_rec_dir(struct dentry *dir, recdir_func *f) goto out; status = vfs_readdir(filp, nfsd4_build_namelist, &names); fput(filp); - mutex_lock(&dir->d_inode->i_mutex); + mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT); while (!list_empty(&names)) { entry = list_entry(names.next, struct name_list, list); @@ -264,7 +264,7 @@ nfsd4_unlink_clid_dir(char *name, int namlen) dprintk("NFSD: nfsd4_unlink_clid_dir. name %.*s\n", namlen, name); - mutex_lock(&rec_dir.dentry->d_inode->i_mutex); + mutex_lock_nested(&rec_dir.dentry->d_inode->i_mutex, I_MUTEX_PARENT); dentry = lookup_one_len(name, rec_dir.dentry, namlen); if (IS_ERR(dentry)) { status = PTR_ERR(dentry); diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index c65a27b76a9..3b711f5147a 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -580,7 +580,6 @@ free_session(struct kref *kref) struct nfsd4_cache_entry *e = &ses->se_slots[i].sl_cache_entry; nfsd4_release_respages(e->ce_respages, e->ce_resused); } - kfree(ses->se_slots); kfree(ses); } diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index b820c311931..b73549d293b 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -2214,6 +2214,15 @@ nfsd4_encode_dirent_fattr(struct nfsd4_readdir *cd, dentry = lookup_one_len(name, cd->rd_fhp->fh_dentry, namlen); if (IS_ERR(dentry)) return nfserrno(PTR_ERR(dentry)); + if (!dentry->d_inode) { + /* + * nfsd_buffered_readdir drops the i_mutex between + * readdir and calling this callback, leaving a window + * where this directory entry could have gone away. + */ + dput(dentry); + return nfserr_noent; + } exp_get(exp); /* @@ -2276,6 +2285,7 @@ nfsd4_encode_dirent(void *ccdv, const char *name, int namlen, struct nfsd4_readdir *cd = container_of(ccd, struct nfsd4_readdir, common); int buflen; __be32 *p = cd->buffer; + __be32 *cookiep; __be32 nfserr = nfserr_toosmall; /* In nfsv4, "." and ".." never make it onto the wire.. */ @@ -2292,7 +2302,7 @@ nfsd4_encode_dirent(void *ccdv, const char *name, int namlen, goto fail; *p++ = xdr_one; /* mark entry present */ - cd->offset = p; /* remember pointer */ + cookiep = p; p = xdr_encode_hyper(p, NFS_OFFSET_MAX); /* offset of next entry */ p = xdr_encode_array(p, name, namlen); /* name length & name */ @@ -2306,6 +2316,8 @@ nfsd4_encode_dirent(void *ccdv, const char *name, int namlen, goto fail; case nfserr_dropit: goto fail; + case nfserr_noent: + goto skip_entry; default: /* * If the client requested the RDATTR_ERROR attribute, @@ -2324,6 +2336,8 @@ nfsd4_encode_dirent(void *ccdv, const char *name, int namlen, } cd->buflen -= (p - cd->buffer); cd->buffer = p; + cd->offset = cookiep; +skip_entry: cd->common.err = nfs_ok; return 0; fail: diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 6c68ffd6b4b..b660435978d 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -1015,6 +1015,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file, host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset); set_fs(oldfs); if (host_err >= 0) { + *cnt = host_err; nfsdstats.io_write += host_err; fsnotify_modify(file->f_path.dentry); } @@ -1060,10 +1061,9 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file, } dprintk("nfsd: write complete host_err=%d\n", host_err); - if (host_err >= 0) { + if (host_err >= 0) err = 0; - *cnt = host_err; - } else + else err = nfserrno(host_err); out: return err; diff --git a/fs/nilfs2/cpfile.c b/fs/nilfs2/cpfile.c index e90b60dfced..300f1cdfa86 100644 --- a/fs/nilfs2/cpfile.c +++ b/fs/nilfs2/cpfile.c @@ -311,7 +311,7 @@ int nilfs_cpfile_delete_checkpoints(struct inode *cpfile, ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh); if (ret < 0) { if (ret != -ENOENT) - goto out_sem; + goto out_header; /* skip hole */ ret = 0; continue; @@ -344,7 +344,7 @@ int nilfs_cpfile_delete_checkpoints(struct inode *cpfile, continue; printk(KERN_ERR "%s: cannot delete block\n", __func__); - goto out_sem; + goto out_header; } } @@ -361,6 +361,8 @@ int nilfs_cpfile_delete_checkpoints(struct inode *cpfile, nilfs_mdt_mark_dirty(cpfile); kunmap_atomic(kaddr, KM_USER0); } + + out_header: brelse(header_bh); out_sem: diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c index 108d281ebca..d6759b92006 100644 --- a/fs/nilfs2/ioctl.c +++ b/fs/nilfs2/ioctl.c @@ -25,6 +25,7 @@ #include /* lock_kernel(), unlock_kernel() */ #include /* capable() */ #include /* copy_from_user(), copy_to_user() */ +#include #include #include "nilfs.h" #include "segment.h" @@ -147,29 +148,12 @@ static ssize_t nilfs_ioctl_do_get_cpinfo(struct the_nilfs *nilfs, __u64 *posp, int flags, void *buf, size_t size, size_t nmembs) { - return nilfs_cpfile_get_cpinfo(nilfs->ns_cpfile, posp, flags, buf, - nmembs); -} - -static int nilfs_ioctl_get_cpinfo(struct inode *inode, struct file *filp, - unsigned int cmd, void __user *argp) -{ - struct the_nilfs *nilfs = NILFS_SB(inode->i_sb)->s_nilfs; - struct nilfs_argv argv; int ret; - if (copy_from_user(&argv, argp, sizeof(argv))) - return -EFAULT; - down_read(&nilfs->ns_segctor_sem); - ret = nilfs_ioctl_wrap_copy(nilfs, &argv, _IOC_DIR(cmd), - nilfs_ioctl_do_get_cpinfo); + ret = nilfs_cpfile_get_cpinfo(nilfs->ns_cpfile, posp, flags, buf, + nmembs); up_read(&nilfs->ns_segctor_sem); - if (ret < 0) - return ret; - - if (copy_to_user(argp, &argv, sizeof(argv))) - ret = -EFAULT; return ret; } @@ -195,28 +179,11 @@ static ssize_t nilfs_ioctl_do_get_suinfo(struct the_nilfs *nilfs, __u64 *posp, int flags, void *buf, size_t size, size_t nmembs) { - return nilfs_sufile_get_suinfo(nilfs->ns_sufile, *posp, buf, nmembs); -} - -static int nilfs_ioctl_get_suinfo(struct inode *inode, struct file *filp, - unsigned int cmd, void __user *argp) -{ - struct the_nilfs *nilfs = NILFS_SB(inode->i_sb)->s_nilfs; - struct nilfs_argv argv; int ret; - if (copy_from_user(&argv, argp, sizeof(argv))) - return -EFAULT; - down_read(&nilfs->ns_segctor_sem); - ret = nilfs_ioctl_wrap_copy(nilfs, &argv, _IOC_DIR(cmd), - nilfs_ioctl_do_get_suinfo); + ret = nilfs_sufile_get_suinfo(nilfs->ns_sufile, *posp, buf, nmembs); up_read(&nilfs->ns_segctor_sem); - if (ret < 0) - return ret; - - if (copy_to_user(argp, &argv, sizeof(argv))) - ret = -EFAULT; return ret; } @@ -242,28 +209,11 @@ static ssize_t nilfs_ioctl_do_get_vinfo(struct the_nilfs *nilfs, __u64 *posp, int flags, void *buf, size_t size, size_t nmembs) { - return nilfs_dat_get_vinfo(nilfs_dat_inode(nilfs), buf, nmembs); -} - -static int nilfs_ioctl_get_vinfo(struct inode *inode, struct file *filp, - unsigned int cmd, void __user *argp) -{ - struct the_nilfs *nilfs = NILFS_SB(inode->i_sb)->s_nilfs; - struct nilfs_argv argv; int ret; - if (copy_from_user(&argv, argp, sizeof(argv))) - return -EFAULT; - down_read(&nilfs->ns_segctor_sem); - ret = nilfs_ioctl_wrap_copy(nilfs, &argv, _IOC_DIR(cmd), - nilfs_ioctl_do_get_vinfo); + ret = nilfs_dat_get_vinfo(nilfs_dat_inode(nilfs), buf, nmembs); up_read(&nilfs->ns_segctor_sem); - if (ret < 0) - return ret; - - if (copy_to_user(argp, &argv, sizeof(argv))) - ret = -EFAULT; return ret; } @@ -276,17 +226,21 @@ nilfs_ioctl_do_get_bdescs(struct the_nilfs *nilfs, __u64 *posp, int flags, struct nilfs_bdesc *bdescs = buf; int ret, i; + down_read(&nilfs->ns_segctor_sem); for (i = 0; i < nmembs; i++) { ret = nilfs_bmap_lookup_at_level(bmap, bdescs[i].bd_offset, bdescs[i].bd_level + 1, &bdescs[i].bd_blocknr); if (ret < 0) { - if (ret != -ENOENT) + if (ret != -ENOENT) { + up_read(&nilfs->ns_segctor_sem); return ret; + } bdescs[i].bd_blocknr = 0; } } + up_read(&nilfs->ns_segctor_sem); return nmembs; } @@ -300,10 +254,11 @@ static int nilfs_ioctl_get_bdescs(struct inode *inode, struct file *filp, if (copy_from_user(&argv, argp, sizeof(argv))) return -EFAULT; - down_read(&nilfs->ns_segctor_sem); + if (argv.v_size != sizeof(struct nilfs_bdesc)) + return -EINVAL; + ret = nilfs_ioctl_wrap_copy(nilfs, &argv, _IOC_DIR(cmd), nilfs_ioctl_do_get_bdescs); - up_read(&nilfs->ns_segctor_sem); if (ret < 0) return ret; @@ -346,10 +301,10 @@ static int nilfs_ioctl_move_inode_block(struct inode *inode, return 0; } -static ssize_t -nilfs_ioctl_do_move_blocks(struct the_nilfs *nilfs, __u64 *posp, int flags, - void *buf, size_t size, size_t nmembs) +static int nilfs_ioctl_move_blocks(struct the_nilfs *nilfs, + struct nilfs_argv *argv, void *buf) { + size_t nmembs = argv->v_nmembs; struct inode *inode; struct nilfs_vdesc *vdesc; struct buffer_head *bh, *n; @@ -410,19 +365,10 @@ nilfs_ioctl_do_move_blocks(struct the_nilfs *nilfs, __u64 *posp, int flags, return ret; } -static inline int nilfs_ioctl_move_blocks(struct the_nilfs *nilfs, - struct nilfs_argv *argv, - int dir) -{ - return nilfs_ioctl_wrap_copy(nilfs, argv, dir, - nilfs_ioctl_do_move_blocks); -} - -static ssize_t -nilfs_ioctl_do_delete_checkpoints(struct the_nilfs *nilfs, __u64 *posp, - int flags, void *buf, size_t size, - size_t nmembs) +static int nilfs_ioctl_delete_checkpoints(struct the_nilfs *nilfs, + struct nilfs_argv *argv, void *buf) { + size_t nmembs = argv->v_nmembs; struct inode *cpfile = nilfs->ns_cpfile; struct nilfs_period *periods = buf; int ret, i; @@ -436,36 +382,21 @@ nilfs_ioctl_do_delete_checkpoints(struct the_nilfs *nilfs, __u64 *posp, return nmembs; } -static inline int nilfs_ioctl_delete_checkpoints(struct the_nilfs *nilfs, - struct nilfs_argv *argv, - int dir) +static int nilfs_ioctl_free_vblocknrs(struct the_nilfs *nilfs, + struct nilfs_argv *argv, void *buf) { - return nilfs_ioctl_wrap_copy(nilfs, argv, dir, - nilfs_ioctl_do_delete_checkpoints); -} + size_t nmembs = argv->v_nmembs; + int ret; -static ssize_t -nilfs_ioctl_do_free_vblocknrs(struct the_nilfs *nilfs, __u64 *posp, int flags, - void *buf, size_t size, size_t nmembs) -{ - int ret = nilfs_dat_freev(nilfs_dat_inode(nilfs), buf, nmembs); + ret = nilfs_dat_freev(nilfs_dat_inode(nilfs), buf, nmembs); return (ret < 0) ? ret : nmembs; } -static inline int nilfs_ioctl_free_vblocknrs(struct the_nilfs *nilfs, - struct nilfs_argv *argv, - int dir) -{ - return nilfs_ioctl_wrap_copy(nilfs, argv, dir, - nilfs_ioctl_do_free_vblocknrs); -} - -static ssize_t -nilfs_ioctl_do_mark_blocks_dirty(struct the_nilfs *nilfs, __u64 *posp, - int flags, void *buf, size_t size, - size_t nmembs) +static int nilfs_ioctl_mark_blocks_dirty(struct the_nilfs *nilfs, + struct nilfs_argv *argv, void *buf) { + size_t nmembs = argv->v_nmembs; struct inode *dat = nilfs_dat_inode(nilfs); struct nilfs_bmap *bmap = NILFS_I(dat)->i_bmap; struct nilfs_bdesc *bdescs = buf; @@ -504,55 +435,37 @@ nilfs_ioctl_do_mark_blocks_dirty(struct the_nilfs *nilfs, __u64 *posp, return nmembs; } -static inline int nilfs_ioctl_mark_blocks_dirty(struct the_nilfs *nilfs, - struct nilfs_argv *argv, - int dir) +static int nilfs_ioctl_free_segments(struct the_nilfs *nilfs, + struct nilfs_argv *argv, void *buf) { - return nilfs_ioctl_wrap_copy(nilfs, argv, dir, - nilfs_ioctl_do_mark_blocks_dirty); -} - -static ssize_t -nilfs_ioctl_do_free_segments(struct the_nilfs *nilfs, __u64 *posp, int flags, - void *buf, size_t size, size_t nmembs) -{ - struct nilfs_sb_info *sbi = nilfs_get_writer(nilfs); + size_t nmembs = argv->v_nmembs; + struct nilfs_sb_info *sbi = nilfs->ns_writer; int ret; - if (unlikely(!sbi)) + if (unlikely(!sbi)) { + /* never happens because called for a writable mount */ + WARN_ON(1); return -EROFS; + } ret = nilfs_segctor_add_segments_to_be_freed( NILFS_SC(sbi), buf, nmembs); - nilfs_put_writer(nilfs); return (ret < 0) ? ret : nmembs; } -static inline int nilfs_ioctl_free_segments(struct the_nilfs *nilfs, - struct nilfs_argv *argv, - int dir) -{ - return nilfs_ioctl_wrap_copy(nilfs, argv, dir, - nilfs_ioctl_do_free_segments); -} - int nilfs_ioctl_prepare_clean_segments(struct the_nilfs *nilfs, - void __user *argp) + struct nilfs_argv *argv, void **kbufs) { - struct nilfs_argv argv[5]; const char *msg; - int dir, ret; + int ret; - if (copy_from_user(argv, argp, sizeof(argv))) - return -EFAULT; - - dir = _IOC_WRITE; - ret = nilfs_ioctl_move_blocks(nilfs, &argv[0], dir); + ret = nilfs_ioctl_move_blocks(nilfs, &argv[0], kbufs[0]); if (ret < 0) { msg = "cannot read source blocks"; goto failed; } - ret = nilfs_ioctl_delete_checkpoints(nilfs, &argv[1], dir); + + ret = nilfs_ioctl_delete_checkpoints(nilfs, &argv[1], kbufs[1]); if (ret < 0) { /* * can safely abort because checkpoints can be removed @@ -561,7 +474,7 @@ int nilfs_ioctl_prepare_clean_segments(struct the_nilfs *nilfs, msg = "cannot delete checkpoints"; goto failed; } - ret = nilfs_ioctl_free_vblocknrs(nilfs, &argv[2], dir); + ret = nilfs_ioctl_free_vblocknrs(nilfs, &argv[2], kbufs[2]); if (ret < 0) { /* * can safely abort because DAT file is updated atomically @@ -570,7 +483,7 @@ int nilfs_ioctl_prepare_clean_segments(struct the_nilfs *nilfs, msg = "cannot delete virtual blocks from DAT file"; goto failed; } - ret = nilfs_ioctl_mark_blocks_dirty(nilfs, &argv[3], dir); + ret = nilfs_ioctl_mark_blocks_dirty(nilfs, &argv[3], kbufs[3]); if (ret < 0) { /* * can safely abort because the operation is nondestructive. @@ -578,7 +491,7 @@ int nilfs_ioctl_prepare_clean_segments(struct the_nilfs *nilfs, msg = "cannot mark copying blocks dirty"; goto failed; } - ret = nilfs_ioctl_free_segments(nilfs, &argv[4], dir); + ret = nilfs_ioctl_free_segments(nilfs, &argv[4], kbufs[4]); if (ret < 0) { /* * can safely abort because this operation is atomic. @@ -598,9 +511,75 @@ int nilfs_ioctl_prepare_clean_segments(struct the_nilfs *nilfs, static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp, unsigned int cmd, void __user *argp) { + struct nilfs_argv argv[5]; + const static size_t argsz[5] = { + sizeof(struct nilfs_vdesc), + sizeof(struct nilfs_period), + sizeof(__u64), + sizeof(struct nilfs_bdesc), + sizeof(__u64), + }; + void __user *base; + void *kbufs[5]; + struct the_nilfs *nilfs; + size_t len, nsegs; + int n, ret; + if (!capable(CAP_SYS_ADMIN)) return -EPERM; - return nilfs_clean_segments(inode->i_sb, argp); + + if (copy_from_user(argv, argp, sizeof(argv))) + return -EFAULT; + + nsegs = argv[4].v_nmembs; + if (argv[4].v_size != argsz[4]) + return -EINVAL; + /* + * argv[4] points to segment numbers this ioctl cleans. We + * use kmalloc() for its buffer because memory used for the + * segment numbers is enough small. + */ + kbufs[4] = memdup_user((void __user *)(unsigned long)argv[4].v_base, + nsegs * sizeof(__u64)); + if (IS_ERR(kbufs[4])) + return PTR_ERR(kbufs[4]); + + nilfs = NILFS_SB(inode->i_sb)->s_nilfs; + + for (n = 0; n < 4; n++) { + ret = -EINVAL; + if (argv[n].v_size != argsz[n]) + goto out_free; + + if (argv[n].v_nmembs > nsegs * nilfs->ns_blocks_per_segment) + goto out_free; + + len = argv[n].v_size * argv[n].v_nmembs; + base = (void __user *)(unsigned long)argv[n].v_base; + if (len == 0) { + kbufs[n] = NULL; + continue; + } + + kbufs[n] = vmalloc(len); + if (!kbufs[n]) { + ret = -ENOMEM; + goto out_free; + } + if (copy_from_user(kbufs[n], base, len)) { + ret = -EFAULT; + vfree(kbufs[n]); + goto out_free; + } + } + + ret = nilfs_clean_segments(inode->i_sb, argv, kbufs); + + out_free: + while (--n >= 0) + vfree(kbufs[n]); + kfree(kbufs[4]); + return ret; } static int nilfs_ioctl_sync(struct inode *inode, struct file *filp, @@ -621,6 +600,33 @@ static int nilfs_ioctl_sync(struct inode *inode, struct file *filp, return 0; } +static int nilfs_ioctl_get_info(struct inode *inode, struct file *filp, + unsigned int cmd, void __user *argp, + size_t membsz, + ssize_t (*dofunc)(struct the_nilfs *, + __u64 *, int, + void *, size_t, size_t)) + +{ + struct the_nilfs *nilfs = NILFS_SB(inode->i_sb)->s_nilfs; + struct nilfs_argv argv; + int ret; + + if (copy_from_user(&argv, argp, sizeof(argv))) + return -EFAULT; + + if (argv.v_size != membsz) + return -EINVAL; + + ret = nilfs_ioctl_wrap_copy(nilfs, &argv, _IOC_DIR(cmd), dofunc); + if (ret < 0) + return ret; + + if (copy_to_user(argp, &argv, sizeof(argv))) + ret = -EFAULT; + return ret; +} + long nilfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct inode *inode = filp->f_dentry->d_inode; @@ -632,16 +638,21 @@ long nilfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) case NILFS_IOCTL_DELETE_CHECKPOINT: return nilfs_ioctl_delete_checkpoint(inode, filp, cmd, argp); case NILFS_IOCTL_GET_CPINFO: - return nilfs_ioctl_get_cpinfo(inode, filp, cmd, argp); + return nilfs_ioctl_get_info(inode, filp, cmd, argp, + sizeof(struct nilfs_cpinfo), + nilfs_ioctl_do_get_cpinfo); case NILFS_IOCTL_GET_CPSTAT: return nilfs_ioctl_get_cpstat(inode, filp, cmd, argp); case NILFS_IOCTL_GET_SUINFO: - return nilfs_ioctl_get_suinfo(inode, filp, cmd, argp); + return nilfs_ioctl_get_info(inode, filp, cmd, argp, + sizeof(struct nilfs_suinfo), + nilfs_ioctl_do_get_suinfo); case NILFS_IOCTL_GET_SUSTAT: return nilfs_ioctl_get_sustat(inode, filp, cmd, argp); case NILFS_IOCTL_GET_VINFO: - /* XXX: rename to ??? */ - return nilfs_ioctl_get_vinfo(inode, filp, cmd, argp); + return nilfs_ioctl_get_info(inode, filp, cmd, argp, + sizeof(struct nilfs_vinfo), + nilfs_ioctl_do_get_vinfo); case NILFS_IOCTL_GET_BDESCS: return nilfs_ioctl_get_bdescs(inode, filp, cmd, argp); case NILFS_IOCTL_CLEAN_SEGMENTS: diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c index 47dd815433f..bb78745a0e3 100644 --- a/fs/nilfs2/mdt.c +++ b/fs/nilfs2/mdt.c @@ -77,19 +77,22 @@ static int nilfs_mdt_create_block(struct inode *inode, unsigned long block, void *)) { struct the_nilfs *nilfs = NILFS_MDT(inode)->mi_nilfs; - struct nilfs_sb_info *writer = NULL; struct super_block *sb = inode->i_sb; struct nilfs_transaction_info ti; struct buffer_head *bh; int err; if (!sb) { - writer = nilfs_get_writer(nilfs); - if (!writer) { + /* + * Make sure this function is not called from any + * read-only context. + */ + if (!nilfs->ns_writer) { + WARN_ON(1); err = -EROFS; goto out; } - sb = writer->s_super; + sb = nilfs->ns_writer->s_super; } nilfs_transaction_begin(sb, &ti, 0); @@ -127,8 +130,6 @@ static int nilfs_mdt_create_block(struct inode *inode, unsigned long block, err = nilfs_transaction_commit(sb); else nilfs_transaction_abort(sb); - if (writer) - nilfs_put_writer(nilfs); out: return err; } @@ -299,7 +300,7 @@ int nilfs_mdt_delete_block(struct inode *inode, unsigned long block) int err; err = nilfs_bmap_delete(ii->i_bmap, block); - if (likely(!err)) { + if (!err || err == -ENOENT) { nilfs_mdt_mark_dirty(inode); nilfs_mdt_forget_block(inode, block); } diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h index 3d0c18a16db..da6fc0bba2e 100644 --- a/fs/nilfs2/nilfs.h +++ b/fs/nilfs2/nilfs.h @@ -236,7 +236,8 @@ extern int nilfs_sync_file(struct file *, struct dentry *, int); /* ioctl.c */ long nilfs_ioctl(struct file *, unsigned int, unsigned long); -int nilfs_ioctl_prepare_clean_segments(struct the_nilfs *, void __user *); +int nilfs_ioctl_prepare_clean_segments(struct the_nilfs *, struct nilfs_argv *, + void **); /* inode.c */ extern struct inode *nilfs_new_inode(struct inode *, int); diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c index 1bfbba9c0e9..a2692bbc7b5 100644 --- a/fs/nilfs2/page.c +++ b/fs/nilfs2/page.c @@ -128,7 +128,8 @@ void nilfs_forget_buffer(struct buffer_head *bh) lock_buffer(bh); clear_buffer_nilfs_volatile(bh); - if (test_clear_buffer_dirty(bh) && nilfs_page_buffers_clean(page)) + clear_buffer_dirty(bh); + if (nilfs_page_buffers_clean(page)) __nilfs_clear_page_dirty(page); clear_buffer_uptodate(bh); diff --git a/fs/nilfs2/recovery.c b/fs/nilfs2/recovery.c index 4fc081e47d7..57afa9d2406 100644 --- a/fs/nilfs2/recovery.c +++ b/fs/nilfs2/recovery.c @@ -407,6 +407,7 @@ void nilfs_dispose_segment_list(struct list_head *head) } static int nilfs_prepare_segment_for_recovery(struct the_nilfs *nilfs, + struct nilfs_sb_info *sbi, struct nilfs_recovery_info *ri) { struct list_head *head = &ri->ri_used_segments; @@ -421,6 +422,7 @@ static int nilfs_prepare_segment_for_recovery(struct the_nilfs *nilfs, segnum[2] = ri->ri_segnum; segnum[3] = ri->ri_nextnum; + nilfs_attach_writer(nilfs, sbi); /* * Releasing the next segment of the latest super root. * The next segment is invalidated by this recovery. @@ -459,10 +461,10 @@ static int nilfs_prepare_segment_for_recovery(struct the_nilfs *nilfs, nilfs->ns_pseg_offset = 0; nilfs->ns_seg_seq = ri->ri_seq + 2; nilfs->ns_nextnum = nilfs->ns_segnum = segnum[0]; - return 0; failed: /* No need to recover sufile because it will be destroyed on error */ + nilfs_detach_writer(nilfs, sbi); return err; } @@ -728,7 +730,7 @@ int nilfs_recover_logical_segments(struct the_nilfs *nilfs, goto failed; if (ri->ri_need_recovery == NILFS_RECOVERY_ROLLFORWARD_DONE) { - err = nilfs_prepare_segment_for_recovery(nilfs, ri); + err = nilfs_prepare_segment_for_recovery(nilfs, sbi, ri); if (unlikely(err)) { printk(KERN_ERR "NILFS: Error preparing segments for " "recovery.\n"); diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index fb70ec3be20..22c7f65c240 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c @@ -2589,7 +2589,8 @@ nilfs_remove_written_gcinodes(struct the_nilfs *nilfs, struct list_head *head) } } -int nilfs_clean_segments(struct super_block *sb, void __user *argp) +int nilfs_clean_segments(struct super_block *sb, struct nilfs_argv *argv, + void **kbufs) { struct nilfs_sb_info *sbi = NILFS_SB(sb); struct nilfs_sc_info *sci = NILFS_SC(sbi); @@ -2606,7 +2607,7 @@ int nilfs_clean_segments(struct super_block *sb, void __user *argp) err = nilfs_init_gcdat_inode(nilfs); if (unlikely(err)) goto out_unlock; - err = nilfs_ioctl_prepare_clean_segments(nilfs, argp); + err = nilfs_ioctl_prepare_clean_segments(nilfs, argv, kbufs); if (unlikely(err)) goto out_unlock; diff --git a/fs/nilfs2/segment.h b/fs/nilfs2/segment.h index a98fc1ed0bb..476bdd5df5b 100644 --- a/fs/nilfs2/segment.h +++ b/fs/nilfs2/segment.h @@ -222,7 +222,8 @@ extern int nilfs_construct_segment(struct super_block *); extern int nilfs_construct_dsync_segment(struct super_block *, struct inode *, loff_t, loff_t); extern void nilfs_flush_segment(struct super_block *, ino_t); -extern int nilfs_clean_segments(struct super_block *, void __user *); +extern int nilfs_clean_segments(struct super_block *, struct nilfs_argv *, + void **); extern int nilfs_segctor_add_segments_to_be_freed(struct nilfs_sc_info *, __u64 *, size_t); diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c index ed0a0cfd68d..579dd1b1110 100644 --- a/fs/ocfs2/symlink.c +++ b/fs/ocfs2/symlink.c @@ -39,6 +39,7 @@ #include #include #include +#include #define MLOG_MASK_PREFIX ML_NAMEI #include @@ -54,26 +55,6 @@ #include "buffer_head_io.h" -static char *ocfs2_page_getlink(struct dentry * dentry, - struct page **ppage); -static char *ocfs2_fast_symlink_getlink(struct inode *inode, - struct buffer_head **bh); - -/* get the link contents into pagecache */ -static char *ocfs2_page_getlink(struct dentry * dentry, - struct page **ppage) -{ - struct page * page; - struct address_space *mapping = dentry->d_inode->i_mapping; - page = read_mapping_page(mapping, 0, NULL); - if (IS_ERR(page)) - goto sync_fail; - *ppage = page; - return kmap(page); - -sync_fail: - return (char*)page; -} static char *ocfs2_fast_symlink_getlink(struct inode *inode, struct buffer_head **bh) @@ -128,40 +109,55 @@ out: return ret; } -static void *ocfs2_follow_link(struct dentry *dentry, - struct nameidata *nd) +static void *ocfs2_fast_follow_link(struct dentry *dentry, + struct nameidata *nd) { - int status; - char *link; + int status = 0; + int len; + char *target, *link = ERR_PTR(-ENOMEM); struct inode *inode = dentry->d_inode; - struct page *page = NULL; struct buffer_head *bh = NULL; - - if (ocfs2_inode_is_fast_symlink(inode)) - link = ocfs2_fast_symlink_getlink(inode, &bh); - else - link = ocfs2_page_getlink(dentry, &page); - if (IS_ERR(link)) { - status = PTR_ERR(link); + + mlog_entry_void(); + + BUG_ON(!ocfs2_inode_is_fast_symlink(inode)); + target = ocfs2_fast_symlink_getlink(inode, &bh); + if (IS_ERR(target)) { + status = PTR_ERR(target); mlog_errno(status); goto bail; } - status = vfs_follow_link(nd, link); + /* Fast symlinks can't be large */ + len = strlen(target); + link = kzalloc(len + 1, GFP_NOFS); + if (!link) { + status = -ENOMEM; + mlog_errno(status); + goto bail; + } + + memcpy(link, target, len); + nd_set_link(nd, link); bail: - if (page) { - kunmap(page); - page_cache_release(page); - } brelse(bh); - return ERR_PTR(status); + mlog_exit(status); + return status ? ERR_PTR(status) : link; +} + +static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie) +{ + char *link = cookie; + + kfree(link); } const struct inode_operations ocfs2_symlink_inode_operations = { .readlink = page_readlink, - .follow_link = ocfs2_follow_link, + .follow_link = page_follow_link_light, + .put_link = page_put_link, .getattr = ocfs2_getattr, .setattr = ocfs2_setattr, .setxattr = generic_setxattr, @@ -171,7 +167,8 @@ const struct inode_operations ocfs2_symlink_inode_operations = { }; const struct inode_operations ocfs2_fast_symlink_inode_operations = { .readlink = ocfs2_readlink, - .follow_link = ocfs2_follow_link, + .follow_link = ocfs2_fast_follow_link, + .put_link = ocfs2_fast_put_link, .getattr = ocfs2_getattr, .setattr = ocfs2_setattr, .setxattr = generic_setxattr, diff --git a/fs/open.c b/fs/open.c index 377eb25b6ab..bdfbf03615a 100644 --- a/fs/open.c +++ b/fs/open.c @@ -1033,7 +1033,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, int mode) if (!IS_ERR(tmp)) { fd = get_unused_fd_flags(flags); if (fd >= 0) { - struct file *f = do_filp_open(dfd, tmp, flags, mode); + struct file *f = do_filp_open(dfd, tmp, flags, mode, 0); if (IS_ERR(f)) { put_unused_fd(fd); fd = PTR_ERR(f); diff --git a/fs/proc/base.c b/fs/proc/base.c index fb45615943c..3326bbf9ab9 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -1956,7 +1956,7 @@ static struct dentry *proc_pident_instantiate(struct inode *dir, const struct pid_entry *p = ptr; struct inode *inode; struct proc_inode *ei; - struct dentry *error = ERR_PTR(-EINVAL); + struct dentry *error = ERR_PTR(-ENOENT); inode = proc_pid_make_inode(dir->i_sb, task); if (!inode) diff --git a/fs/proc/root.c b/fs/proc/root.c index 1e15a2b176e..b080b791d9e 100644 --- a/fs/proc/root.c +++ b/fs/proc/root.c @@ -67,8 +67,7 @@ static int proc_get_sb(struct file_system_type *fs_type, sb->s_flags = flags; err = proc_fill_super(sb); if (err) { - up_write(&sb->s_umount); - deactivate_super(sb); + deactivate_locked_super(sb); return err; } diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c index 67a80d7e59e..45ee3d357c7 100644 --- a/fs/reiserfs/dir.c +++ b/fs/reiserfs/dir.c @@ -41,6 +41,18 @@ static int reiserfs_dir_fsync(struct file *filp, struct dentry *dentry, #define store_ih(where,what) copy_item_head (where, what) +static inline bool is_privroot_deh(struct dentry *dir, + struct reiserfs_de_head *deh) +{ + int ret = 0; +#ifdef CONFIG_REISERFS_FS_XATTR + struct dentry *privroot = REISERFS_SB(dir->d_sb)->priv_root; + ret = (dir == dir->d_parent && privroot->d_inode && + deh->deh_objectid == INODE_PKEY(privroot->d_inode)->k_objectid); +#endif + return ret; +} + int reiserfs_readdir_dentry(struct dentry *dentry, void *dirent, filldir_t filldir, loff_t *pos) { @@ -138,18 +150,8 @@ int reiserfs_readdir_dentry(struct dentry *dentry, void *dirent, } /* Ignore the .reiserfs_priv entry */ - if (reiserfs_xattrs(inode->i_sb) && - !old_format_only(inode->i_sb) && - dentry == inode->i_sb->s_root && - REISERFS_SB(inode->i_sb)->priv_root && - REISERFS_SB(inode->i_sb)->priv_root->d_inode - && deh_objectid(deh) == - le32_to_cpu(INODE_PKEY - (REISERFS_SB(inode->i_sb)-> - priv_root->d_inode)-> - k_objectid)) { + if (is_privroot_deh(dentry, deh)) continue; - } d_off = deh_offset(deh); *pos = d_off; diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c index efd4d720718..27157912863 100644 --- a/fs/reiserfs/namei.c +++ b/fs/reiserfs/namei.c @@ -338,21 +338,8 @@ static struct dentry *reiserfs_lookup(struct inode *dir, struct dentry *dentry, &path_to_entry, &de); pathrelse(&path_to_entry); if (retval == NAME_FOUND) { - /* Hide the .reiserfs_priv directory */ - if (reiserfs_xattrs(dir->i_sb) && - !old_format_only(dir->i_sb) && - REISERFS_SB(dir->i_sb)->priv_root && - REISERFS_SB(dir->i_sb)->priv_root->d_inode && - de.de_objectid == - le32_to_cpu(INODE_PKEY - (REISERFS_SB(dir->i_sb)->priv_root->d_inode)-> - k_objectid)) { - reiserfs_write_unlock(dir->i_sb); - return ERR_PTR(-EACCES); - } - - inode = - reiserfs_iget(dir->i_sb, (struct cpu_key *)&(de.de_dir_id)); + inode = reiserfs_iget(dir->i_sb, + (struct cpu_key *)&(de.de_dir_id)); if (!inode || IS_ERR(inode)) { reiserfs_write_unlock(dir->i_sb); return ERR_PTR(-EACCES); diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c index 0ae6486d904..3567fb9e3fb 100644 --- a/fs/reiserfs/super.c +++ b/fs/reiserfs/super.c @@ -448,13 +448,11 @@ int remove_save_link(struct inode *inode, int truncate) static void reiserfs_kill_sb(struct super_block *s) { if (REISERFS_SB(s)) { -#ifdef CONFIG_REISERFS_FS_XATTR if (REISERFS_SB(s)->xattr_root) { d_invalidate(REISERFS_SB(s)->xattr_root); dput(REISERFS_SB(s)->xattr_root); REISERFS_SB(s)->xattr_root = NULL; } -#endif if (REISERFS_SB(s)->priv_root) { d_invalidate(REISERFS_SB(s)->priv_root); dput(REISERFS_SB(s)->priv_root); @@ -1316,8 +1314,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg) } out_ok: - kfree(s->s_options); - s->s_options = new_opts; + replace_mount_options(s, new_opts); return 0; out_err: @@ -1842,7 +1839,8 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent) goto error; } - if ((errval = reiserfs_xattr_init(s, s->s_flags))) { + if ((errval = reiserfs_lookup_privroot(s)) || + (errval = reiserfs_xattr_init(s, s->s_flags))) { dput(s->s_root); s->s_root = NULL; goto error; @@ -1855,7 +1853,8 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent) reiserfs_info(s, "using 3.5.x disk format\n"); } - if ((errval = reiserfs_xattr_init(s, s->s_flags))) { + if ((errval = reiserfs_lookup_privroot(s)) || + (errval = reiserfs_xattr_init(s, s->s_flags))) { dput(s->s_root); s->s_root = NULL; goto error; diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c index f83f52bae39..8e7deb0e696 100644 --- a/fs/reiserfs/xattr.c +++ b/fs/reiserfs/xattr.c @@ -113,41 +113,30 @@ static int xattr_rmdir(struct inode *dir, struct dentry *dentry) #define xattr_may_create(flags) (!flags || flags & XATTR_CREATE) -/* Returns and possibly creates the xattr dir. */ -static struct dentry *lookup_or_create_dir(struct dentry *parent, - const char *name, int flags) -{ - struct dentry *dentry; - BUG_ON(!parent); - - dentry = lookup_one_len(name, parent, strlen(name)); - if (IS_ERR(dentry)) - return dentry; - else if (!dentry->d_inode) { - int err = -ENODATA; - - if (xattr_may_create(flags)) { - mutex_lock_nested(&parent->d_inode->i_mutex, - I_MUTEX_XATTR); - err = xattr_mkdir(parent->d_inode, dentry, 0700); - mutex_unlock(&parent->d_inode->i_mutex); - } - - if (err) { - dput(dentry); - dentry = ERR_PTR(err); - } - } - - return dentry; -} - static struct dentry *open_xa_root(struct super_block *sb, int flags) { struct dentry *privroot = REISERFS_SB(sb)->priv_root; - if (!privroot) + struct dentry *xaroot; + if (!privroot->d_inode) return ERR_PTR(-ENODATA); - return lookup_or_create_dir(privroot, XAROOT_NAME, flags); + + mutex_lock_nested(&privroot->d_inode->i_mutex, I_MUTEX_XATTR); + + xaroot = dget(REISERFS_SB(sb)->xattr_root); + if (!xaroot) + xaroot = ERR_PTR(-ENODATA); + else if (!xaroot->d_inode) { + int err = -ENODATA; + if (xattr_may_create(flags)) + err = xattr_mkdir(privroot->d_inode, xaroot, 0700); + if (err) { + dput(xaroot); + xaroot = ERR_PTR(err); + } + } + + mutex_unlock(&privroot->d_inode->i_mutex); + return xaroot; } static struct dentry *open_xa_dir(const struct inode *inode, int flags) @@ -163,10 +152,22 @@ static struct dentry *open_xa_dir(const struct inode *inode, int flags) le32_to_cpu(INODE_PKEY(inode)->k_objectid), inode->i_generation); - xadir = lookup_or_create_dir(xaroot, namebuf, flags); + mutex_lock_nested(&xaroot->d_inode->i_mutex, I_MUTEX_XATTR); + + xadir = lookup_one_len(namebuf, xaroot, strlen(namebuf)); + if (!IS_ERR(xadir) && !xadir->d_inode) { + int err = -ENODATA; + if (xattr_may_create(flags)) + err = xattr_mkdir(xaroot->d_inode, xadir, 0700); + if (err) { + dput(xadir); + xadir = ERR_PTR(err); + } + } + + mutex_unlock(&xaroot->d_inode->i_mutex); dput(xaroot); return xadir; - } /* The following are side effects of other operations that aren't explicitly @@ -184,6 +185,7 @@ fill_with_dentries(void *buf, const char *name, int namelen, loff_t offset, { struct reiserfs_dentry_buf *dbuf = buf; struct dentry *dentry; + WARN_ON_ONCE(!mutex_is_locked(&dbuf->xadir->d_inode->i_mutex)); if (dbuf->count == ARRAY_SIZE(dbuf->dentries)) return -ENOSPC; @@ -349,6 +351,7 @@ static struct dentry *xattr_lookup(struct inode *inode, const char *name, if (IS_ERR(xadir)) return ERR_CAST(xadir); + mutex_lock_nested(&xadir->d_inode->i_mutex, I_MUTEX_XATTR); xafile = lookup_one_len(name, xadir, strlen(name)); if (IS_ERR(xafile)) { err = PTR_ERR(xafile); @@ -360,18 +363,15 @@ static struct dentry *xattr_lookup(struct inode *inode, const char *name, if (!xafile->d_inode) { err = -ENODATA; - if (xattr_may_create(flags)) { - mutex_lock_nested(&xadir->d_inode->i_mutex, - I_MUTEX_XATTR); + if (xattr_may_create(flags)) err = xattr_create(xadir->d_inode, xafile, 0700|S_IFREG); - mutex_unlock(&xadir->d_inode->i_mutex); - } } if (err) dput(xafile); out: + mutex_unlock(&xadir->d_inode->i_mutex); dput(xadir); if (err) return ERR_PTR(err); @@ -435,6 +435,7 @@ static int lookup_and_delete_xattr(struct inode *inode, const char *name) if (IS_ERR(xadir)) return PTR_ERR(xadir); + mutex_lock_nested(&xadir->d_inode->i_mutex, I_MUTEX_XATTR); dentry = lookup_one_len(name, xadir, strlen(name)); if (IS_ERR(dentry)) { err = PTR_ERR(dentry); @@ -442,14 +443,13 @@ static int lookup_and_delete_xattr(struct inode *inode, const char *name) } if (dentry->d_inode) { - mutex_lock_nested(&xadir->d_inode->i_mutex, I_MUTEX_XATTR); err = xattr_unlink(xadir->d_inode, dentry); - mutex_unlock(&xadir->d_inode->i_mutex); update_ctime(inode); } dput(dentry); out_dput: + mutex_unlock(&xadir->d_inode->i_mutex); dput(xadir); return err; } @@ -687,20 +687,6 @@ out: return err; } -/* Actual operations that are exported to VFS-land */ -struct xattr_handler *reiserfs_xattr_handlers[] = { - &reiserfs_xattr_user_handler, - &reiserfs_xattr_trusted_handler, -#ifdef CONFIG_REISERFS_FS_SECURITY - &reiserfs_xattr_security_handler, -#endif -#ifdef CONFIG_REISERFS_FS_POSIX_ACL - &reiserfs_posix_acl_access_handler, - &reiserfs_posix_acl_default_handler, -#endif - NULL -}; - /* * In order to implement different sets of xattr operations for each xattr * prefix with the generic xattr API, a filesystem should create a @@ -843,7 +829,7 @@ ssize_t reiserfs_listxattr(struct dentry * dentry, char *buffer, size_t size) if (!dentry->d_inode) return -EINVAL; - if (!reiserfs_xattrs(dentry->d_sb) || + if (!dentry->d_sb->s_xattr || get_inode_sd_version(dentry->d_inode) == STAT_DATA_V1) return -EOPNOTSUPP; @@ -885,42 +871,50 @@ static int reiserfs_check_acl(struct inode *inode, int mask) return error; } -int reiserfs_permission(struct inode *inode, int mask) -{ - /* - * We don't do permission checks on the internal objects. - * Permissions are determined by the "owning" object. - */ - if (IS_PRIVATE(inode)) - return 0; - /* - * Stat data v1 doesn't support ACLs. - */ - if (get_inode_sd_version(inode) == STAT_DATA_V1) - return generic_permission(inode, mask, NULL); - else - return generic_permission(inode, mask, reiserfs_check_acl); -} - static int create_privroot(struct dentry *dentry) { int err; struct inode *inode = dentry->d_parent->d_inode; - mutex_lock_nested(&inode->i_mutex, I_MUTEX_XATTR); + WARN_ON_ONCE(!mutex_is_locked(&inode->i_mutex)); + err = xattr_mkdir(inode, dentry, 0700); - mutex_unlock(&inode->i_mutex); - if (err) { - dput(dentry); - dentry = NULL; + if (err || !dentry->d_inode) { + reiserfs_warning(dentry->d_sb, "jdm-20006", + "xattrs/ACLs enabled and couldn't " + "find/create .reiserfs_priv. " + "Failing mount."); + return -EOPNOTSUPP; } - if (dentry && dentry->d_inode) - reiserfs_info(dentry->d_sb, "Created %s - reserved for xattr " - "storage.\n", PRIVROOT_NAME); + dentry->d_inode->i_flags |= S_PRIVATE; + reiserfs_info(dentry->d_sb, "Created %s - reserved for xattr " + "storage.\n", PRIVROOT_NAME); - return err; + return 0; } +#else +int __init reiserfs_xattr_register_handlers(void) { return 0; } +void reiserfs_xattr_unregister_handlers(void) {} +static int create_privroot(struct dentry *dentry) { return 0; } +#endif + +/* Actual operations that are exported to VFS-land */ +struct xattr_handler *reiserfs_xattr_handlers[] = { +#ifdef CONFIG_REISERFS_FS_XATTR + &reiserfs_xattr_user_handler, + &reiserfs_xattr_trusted_handler, +#endif +#ifdef CONFIG_REISERFS_FS_SECURITY + &reiserfs_xattr_security_handler, +#endif +#ifdef CONFIG_REISERFS_FS_POSIX_ACL + &reiserfs_posix_acl_access_handler, + &reiserfs_posix_acl_default_handler, +#endif + NULL +}; + static int xattr_mount_check(struct super_block *s) { /* We need generation numbers to ensure that the oid mapping is correct @@ -940,21 +934,33 @@ static int xattr_mount_check(struct super_block *s) return 0; } -#else -int __init reiserfs_xattr_register_handlers(void) { return 0; } -void reiserfs_xattr_unregister_handlers(void) {} +int reiserfs_permission(struct inode *inode, int mask) +{ + /* + * We don't do permission checks on the internal objects. + * Permissions are determined by the "owning" object. + */ + if (IS_PRIVATE(inode)) + return 0; + +#ifdef CONFIG_REISERFS_FS_XATTR + /* + * Stat data v1 doesn't support ACLs. + */ + if (get_inode_sd_version(inode) != STAT_DATA_V1) + return generic_permission(inode, mask, reiserfs_check_acl); #endif + return generic_permission(inode, mask, NULL); +} /* This will catch lookups from the fs root to .reiserfs_priv */ static int xattr_lookup_poison(struct dentry *dentry, struct qstr *q1, struct qstr *name) { struct dentry *priv_root = REISERFS_SB(dentry->d_sb)->priv_root; - if (name->len == priv_root->d_name.len && - name->hash == priv_root->d_name.hash && - !memcmp(name->name, priv_root->d_name.name, name->len)) { + if (container_of(q1, struct dentry, d_name) == priv_root) return -ENOENT; - } else if (q1->len == name->len && + if (q1->len == name->len && !memcmp(q1->name, name->name, name->len)) return 0; return 1; @@ -964,73 +970,71 @@ static const struct dentry_operations xattr_lookup_poison_ops = { .d_compare = xattr_lookup_poison, }; +int reiserfs_lookup_privroot(struct super_block *s) +{ + struct dentry *dentry; + int err = 0; + + /* If we don't have the privroot located yet - go find it */ + mutex_lock(&s->s_root->d_inode->i_mutex); + dentry = lookup_one_len(PRIVROOT_NAME, s->s_root, + strlen(PRIVROOT_NAME)); + if (!IS_ERR(dentry)) { + REISERFS_SB(s)->priv_root = dentry; + s->s_root->d_op = &xattr_lookup_poison_ops; + if (dentry->d_inode) + dentry->d_inode->i_flags |= S_PRIVATE; + } else + err = PTR_ERR(dentry); + mutex_unlock(&s->s_root->d_inode->i_mutex); + + return err; +} + /* We need to take a copy of the mount flags since things like * MS_RDONLY don't get set until *after* we're called. * mount_flags != mount_options */ int reiserfs_xattr_init(struct super_block *s, int mount_flags) { int err = 0; + struct dentry *privroot = REISERFS_SB(s)->priv_root; -#ifdef CONFIG_REISERFS_FS_XATTR err = xattr_mount_check(s); if (err) goto error; -#endif - /* If we don't have the privroot located yet - go find it */ - if (!REISERFS_SB(s)->priv_root) { - struct dentry *dentry; - dentry = lookup_one_len(PRIVROOT_NAME, s->s_root, - strlen(PRIVROOT_NAME)); - if (!IS_ERR(dentry)) { -#ifdef CONFIG_REISERFS_FS_XATTR - if (!(mount_flags & MS_RDONLY) && !dentry->d_inode) - err = create_privroot(dentry); -#endif - if (!dentry->d_inode) { - dput(dentry); - dentry = NULL; - } - } else - err = PTR_ERR(dentry); - - if (!err && dentry) { - s->s_root->d_op = &xattr_lookup_poison_ops; - dentry->d_inode->i_flags |= S_PRIVATE; - REISERFS_SB(s)->priv_root = dentry; -#ifdef CONFIG_REISERFS_FS_XATTR - /* xattrs are unavailable */ - } else if (!(mount_flags & MS_RDONLY)) { - /* If we're read-only it just means that the dir - * hasn't been created. Not an error -- just no - * xattrs on the fs. We'll check again if we - * go read-write */ - reiserfs_warning(s, "jdm-20006", - "xattrs/ACLs enabled and couldn't " - "find/create .reiserfs_priv. " - "Failing mount."); - err = -EOPNOTSUPP; -#endif - } + if (!privroot->d_inode && !(mount_flags & MS_RDONLY)) { + mutex_lock(&s->s_root->d_inode->i_mutex); + err = create_privroot(REISERFS_SB(s)->priv_root); + mutex_unlock(&s->s_root->d_inode->i_mutex); } -#ifdef CONFIG_REISERFS_FS_XATTR - if (!err) + if (privroot->d_inode) { s->s_xattr = reiserfs_xattr_handlers; + mutex_lock(&privroot->d_inode->i_mutex); + if (!REISERFS_SB(s)->xattr_root) { + struct dentry *dentry; + dentry = lookup_one_len(XAROOT_NAME, privroot, + strlen(XAROOT_NAME)); + if (!IS_ERR(dentry)) + REISERFS_SB(s)->xattr_root = dentry; + else + err = PTR_ERR(dentry); + } + mutex_unlock(&privroot->d_inode->i_mutex); + } error: if (err) { clear_bit(REISERFS_XATTRS_USER, &(REISERFS_SB(s)->s_mount_opt)); clear_bit(REISERFS_POSIXACL, &(REISERFS_SB(s)->s_mount_opt)); } -#endif /* The super_block MS_POSIXACL must mirror the (no)acl mount option. */ - s->s_flags = s->s_flags & ~MS_POSIXACL; -#ifdef CONFIG_REISERFS_FS_POSIX_ACL if (reiserfs_posixacl(s)) s->s_flags |= MS_POSIXACL; -#endif + else + s->s_flags &= ~MS_POSIXACL; return err; } diff --git a/fs/reiserfs/xattr_security.c b/fs/reiserfs/xattr_security.c index 4d3c20e787c..a92c8792c0f 100644 --- a/fs/reiserfs/xattr_security.c +++ b/fs/reiserfs/xattr_security.c @@ -55,8 +55,16 @@ int reiserfs_security_init(struct inode *dir, struct inode *inode, struct reiserfs_security_handle *sec) { int blocks = 0; - int error = security_inode_init_security(inode, dir, &sec->name, - &sec->value, &sec->length); + int error; + + sec->name = NULL; + + /* Don't add selinux attributes on xattrs - they'll never get used */ + if (IS_PRIVATE(dir)) + return 0; + + error = security_inode_init_security(inode, dir, &sec->name, + &sec->value, &sec->length); if (error) { if (error == -EOPNOTSUPP) error = 0; diff --git a/fs/romfs/super.c b/fs/romfs/super.c index c53b5ef8a02..4ab3c03d8f9 100644 --- a/fs/romfs/super.c +++ b/fs/romfs/super.c @@ -298,7 +298,8 @@ static struct inode *romfs_iget(struct super_block *sb, unsigned long pos) struct romfs_inode ri; struct inode *i; unsigned long nlen; - unsigned nextfh, ret; + unsigned nextfh; + int ret; umode_t mode; /* we might have to traverse a chain of "hard link" file entries to get diff --git a/fs/squashfs/Makefile b/fs/squashfs/Makefile index 8258cf9a031..70e3244fa30 100644 --- a/fs/squashfs/Makefile +++ b/fs/squashfs/Makefile @@ -5,4 +5,3 @@ obj-$(CONFIG_SQUASHFS) += squashfs.o squashfs-y += block.o cache.o dir.o export.o file.o fragment.o id.o inode.o squashfs-y += namei.o super.o symlink.o -#squashfs-y += squashfs2_0.o diff --git a/fs/squashfs/cache.c b/fs/squashfs/cache.c index 1c4739e33af..40c98fa6b5d 100644 --- a/fs/squashfs/cache.c +++ b/fs/squashfs/cache.c @@ -252,6 +252,7 @@ struct squashfs_cache *squashfs_cache_init(char *name, int entries, cache->entries = entries; cache->block_size = block_size; cache->pages = block_size >> PAGE_CACHE_SHIFT; + cache->pages = cache->pages ? cache->pages : 1; cache->name = name; cache->num_waiters = 0; spin_lock_init(&cache->lock); diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c index ffa6edcd2d0..0adc624c956 100644 --- a/fs/squashfs/super.c +++ b/fs/squashfs/super.c @@ -157,6 +157,16 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent) if (msblk->block_size > SQUASHFS_FILE_MAX_SIZE) goto failed_mount; + /* + * Check the system page size is not larger than the filesystem + * block size (by default 128K). This is currently not supported. + */ + if (PAGE_CACHE_SIZE > msblk->block_size) { + ERROR("Page size > filesystem block size (%d). This is " + "currently not supported!\n", msblk->block_size); + goto failed_mount; + } + msblk->block_log = le16_to_cpu(sblk->block_log); if (msblk->block_log > SQUASHFS_FILE_MAX_LOG) goto failed_mount; diff --git a/fs/super.c b/fs/super.c index 786fe7d7279..1943fdf655f 100644 --- a/fs/super.c +++ b/fs/super.c @@ -207,6 +207,34 @@ void deactivate_super(struct super_block *s) EXPORT_SYMBOL(deactivate_super); +/** + * deactivate_locked_super - drop an active reference to superblock + * @s: superblock to deactivate + * + * Equivalent of up_write(&s->s_umount); deactivate_super(s);, except that + * it does not unlock it until it's all over. As the result, it's safe to + * use to dispose of new superblock on ->get_sb() failure exits - nobody + * will see the sucker until it's all over. Equivalent using up_write + + * deactivate_super is safe for that purpose only if superblock is either + * safe to use or has NULL ->s_root when we unlock. + */ +void deactivate_locked_super(struct super_block *s) +{ + struct file_system_type *fs = s->s_type; + if (atomic_dec_and_lock(&s->s_active, &sb_lock)) { + s->s_count -= S_BIAS-1; + spin_unlock(&sb_lock); + vfs_dq_off(s, 0); + fs->kill_sb(s); + put_filesystem(fs); + put_super(s); + } else { + up_write(&s->s_umount); + } +} + +EXPORT_SYMBOL(deactivate_locked_super); + /** * grab_super - acquire an active reference * @s: reference we are trying to make active @@ -797,8 +825,7 @@ int get_sb_ns(struct file_system_type *fs_type, int flags, void *data, sb->s_flags = flags; err = fill_super(sb, data, flags & MS_SILENT ? 1 : 0); if (err) { - up_write(&sb->s_umount); - deactivate_super(sb); + deactivate_locked_super(sb); return err; } @@ -854,8 +881,7 @@ int get_sb_bdev(struct file_system_type *fs_type, if (s->s_root) { if ((flags ^ s->s_flags) & MS_RDONLY) { - up_write(&s->s_umount); - deactivate_super(s); + deactivate_locked_super(s); error = -EBUSY; goto error_bdev; } @@ -870,8 +896,7 @@ int get_sb_bdev(struct file_system_type *fs_type, sb_set_blocksize(s, block_size(bdev)); error = fill_super(s, data, flags & MS_SILENT ? 1 : 0); if (error) { - up_write(&s->s_umount); - deactivate_super(s); + deactivate_locked_super(s); goto error; } @@ -897,7 +922,7 @@ void kill_block_super(struct super_block *sb) struct block_device *bdev = sb->s_bdev; fmode_t mode = sb->s_mode; - bdev->bd_super = 0; + bdev->bd_super = NULL; generic_shutdown_super(sb); sync_blockdev(bdev); close_bdev_exclusive(bdev, mode); @@ -921,8 +946,7 @@ int get_sb_nodev(struct file_system_type *fs_type, error = fill_super(s, data, flags & MS_SILENT ? 1 : 0); if (error) { - up_write(&s->s_umount); - deactivate_super(s); + deactivate_locked_super(s); return error; } s->s_flags |= MS_ACTIVE; @@ -952,8 +976,7 @@ int get_sb_single(struct file_system_type *fs_type, s->s_flags = flags; error = fill_super(s, data, flags & MS_SILENT ? 1 : 0); if (error) { - up_write(&s->s_umount); - deactivate_super(s); + deactivate_locked_super(s); return error; } s->s_flags |= MS_ACTIVE; @@ -1006,8 +1029,7 @@ vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void return mnt; out_sb: dput(mnt->mnt_root); - up_write(&mnt->mnt_sb->s_umount); - deactivate_super(mnt->mnt_sb); + deactivate_locked_super(mnt->mnt_sb); out_free_secdata: free_secdata(secdata); out_mnt: diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c index b1606e07b7a..561a9c050ce 100644 --- a/fs/sysfs/file.c +++ b/fs/sysfs/file.c @@ -723,7 +723,7 @@ int sysfs_schedule_callback(struct kobject *kobj, void (*func)(void *), mutex_unlock(&sysfs_workq_mutex); if (sysfs_workqueue == NULL) { - sysfs_workqueue = create_workqueue("sysfsd"); + sysfs_workqueue = create_singlethread_workqueue("sysfsd"); if (sysfs_workqueue == NULL) { module_put(owner); return -ENOMEM; diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index faa44f90608..e9f7a754c4f 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -2055,8 +2055,7 @@ static int ubifs_get_sb(struct file_system_type *fs_type, int flags, return 0; out_deact: - up_write(&sb->s_umount); - deactivate_super(sb); + deactivate_locked_super(sb); out_close: ubi_close_volume(ubi); return err; diff --git a/fs/ufs/dir.c b/fs/ufs/dir.c index dbbbc466876..6321b797061 100644 --- a/fs/ufs/dir.c +++ b/fs/ufs/dir.c @@ -666,6 +666,6 @@ not_empty: const struct file_operations ufs_dir_operations = { .read = generic_read_dir, .readdir = ufs_readdir, - .fsync = file_fsync, + .fsync = ufs_sync_file, .llseek = generic_file_llseek, }; diff --git a/fs/ufs/file.c b/fs/ufs/file.c index 625ef17c6f8..2bd3a161571 100644 --- a/fs/ufs/file.c +++ b/fs/ufs/file.c @@ -30,7 +30,7 @@ #include "ufs.h" -static int ufs_sync_file(struct file *file, struct dentry *dentry, int datasync) +int ufs_sync_file(struct file *file, struct dentry *dentry, int datasync) { struct inode *inode = dentry->d_inode; int err; diff --git a/fs/ufs/ufs.h b/fs/ufs/ufs.h index 69b3427d788..d0c4acd4f1f 100644 --- a/fs/ufs/ufs.h +++ b/fs/ufs/ufs.h @@ -98,8 +98,8 @@ extern void ufs_set_link(struct inode *dir, struct ufs_dir_entry *de, /* file.c */ extern const struct inode_operations ufs_file_inode_operations; extern const struct file_operations ufs_file_operations; - extern const struct address_space_operations ufs_aops; +extern int ufs_sync_file(struct file *, struct dentry *, int); /* ialloc.c */ extern void ufs_free_inode (struct inode *inode); diff --git a/fs/xfs/linux-2.6/kmem.h b/fs/xfs/linux-2.6/kmem.h index af6843c7ee4..179cbd630f6 100644 --- a/fs/xfs/linux-2.6/kmem.h +++ b/fs/xfs/linux-2.6/kmem.h @@ -103,7 +103,7 @@ extern void *kmem_zone_zalloc(kmem_zone_t *, unsigned int __nocast); static inline int kmem_shake_allow(gfp_t gfp_mask) { - return (gfp_mask & __GFP_WAIT) != 0; + return ((gfp_mask & __GFP_WAIT) && (gfp_mask & __GFP_FS)); } #endif /* __XFS_SUPPORT_KMEM_H__ */ diff --git a/fs/xfs/xfs_dfrag.c b/fs/xfs/xfs_dfrag.c index e6d839bddbf..7465f9ee125 100644 --- a/fs/xfs/xfs_dfrag.c +++ b/fs/xfs/xfs_dfrag.c @@ -347,13 +347,15 @@ xfs_swap_extents( error = xfs_trans_commit(tp, XFS_TRANS_SWAPEXT); -out_unlock: - xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); - xfs_iunlock(tip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); out: kmem_free(tempifp); return error; +out_unlock: + xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); + xfs_iunlock(tip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); + goto out; + out_trans_cancel: xfs_trans_cancel(tp, 0); goto out_unlock; diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c index 8379e3bca26..cbd451bb484 100644 --- a/fs/xfs/xfs_fsops.c +++ b/fs/xfs/xfs_fsops.c @@ -160,7 +160,7 @@ xfs_growfs_data_private( nagcount = new + (nb_mod != 0); if (nb_mod && nb_mod < XFS_MIN_AG_BLOCKS) { nagcount--; - nb = nagcount * mp->m_sb.sb_agblocks; + nb = (xfs_rfsblock_t)nagcount * mp->m_sb.sb_agblocks; if (nb < mp->m_sb.sb_dblocks) return XFS_ERROR(EINVAL); } diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h index dbd6150763e..fc218444e31 100644 --- a/include/asm-generic/local.h +++ b/include/asm-generic/local.h @@ -42,7 +42,7 @@ typedef struct #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n)) #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n)) -#define local_add_unless(l, a, u) atomic_long_add_unless((&(l)->a), (a), (u)) +#define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u)) #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a) /* Non-atomic variants, ie. preemption disabled and won't be touched diff --git a/include/drm/drmP.h b/include/drm/drmP.h index c8c42215143..b84d8ae35e6 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -1519,6 +1519,30 @@ static __inline__ void *drm_calloc(size_t nmemb, size_t size, int area) { return kcalloc(nmemb, size, GFP_KERNEL); } + +static __inline__ void *drm_calloc_large(size_t nmemb, size_t size) +{ + u8 *addr; + + if (size <= PAGE_SIZE) + return kcalloc(nmemb, size, GFP_KERNEL); + + addr = vmalloc(nmemb * size); + if (!addr) + return NULL; + + memset(addr, 0, nmemb * size); + + return addr; +} + +static __inline void drm_free_large(void *ptr) +{ + if (!is_vmalloc_addr(ptr)) + return kfree(ptr); + + vfree(ptr); +} #else extern void *drm_alloc(size_t size, int area); extern void drm_free(void *pt, size_t size, int area); diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 3c1924c010e..7300fb86676 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -471,6 +471,9 @@ struct drm_connector { u32 property_ids[DRM_CONNECTOR_MAX_PROPERTY]; uint64_t property_values[DRM_CONNECTOR_MAX_PROPERTY]; + /* requested DPMS state */ + int dpms; + void *helper_private; uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER]; diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h index ec073d8288d..6769ff6c1bc 100644 --- a/include/drm/drm_crtc_helper.h +++ b/include/drm/drm_crtc_helper.h @@ -99,6 +99,8 @@ extern bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, struct drm_framebuffer *old_fb); extern bool drm_helper_crtc_in_use(struct drm_crtc *crtc); +extern void drm_helper_connector_dpms(struct drm_connector *connector, int mode); + extern int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb, struct drm_mode_fb_cmd *mode_cmd); diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h index 95962fa8398..8e1e92583fb 100644 --- a/include/drm/i915_drm.h +++ b/include/drm/i915_drm.h @@ -184,6 +184,7 @@ typedef struct _drm_i915_sarea { #define DRM_I915_GEM_GET_TILING 0x22 #define DRM_I915_GEM_GET_APERTURE 0x23 #define DRM_I915_GEM_MMAP_GTT 0x24 +#define DRM_I915_GET_PIPE_FROM_CRTC_ID 0x25 #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) #define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) @@ -219,6 +220,7 @@ typedef struct _drm_i915_sarea { #define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling) #define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling) #define DRM_IOCTL_I915_GEM_GET_APERTURE DRM_IOR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture) +#define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_intel_get_pipe_from_crtc_id) /* Allow drivers to submit batchbuffers directly to hardware, relying * on the security mechanisms provided by hardware. @@ -657,4 +659,12 @@ struct drm_i915_gem_get_aperture { __u64 aper_available_size; }; +struct drm_i915_get_pipe_from_crtc_id { + /** ID of CRTC being requested **/ + __u32 crtc_id; + + /** pipe of requested CRTC **/ + __u32 pipe; +}; + #endif /* _I915_DRM_H_ */ diff --git a/include/linux/Kbuild b/include/linux/Kbuild index ca9b9b9bd33..3f0eaa397ef 100644 --- a/include/linux/Kbuild +++ b/include/linux/Kbuild @@ -138,6 +138,7 @@ header-y += qnxtypes.h header-y += radeonfb.h header-y += raw.h header-y += resource.h +header-y += romfs_fs.h header-y += rose.h header-y += serial_reg.h header-y += smbno.h @@ -314,7 +315,6 @@ unifdef-y += irqnr.h unifdef-y += reboot.h unifdef-y += reiserfs_fs.h unifdef-y += reiserfs_xattr.h -unifdef-y += romfs_fs.h unifdef-y += route.h unifdef-y += rtc.h unifdef-y += rtnetlink.h diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h index 51e6e54b2aa..9b93cafa82a 100644 --- a/include/linux/amba/bus.h +++ b/include/linux/amba/bus.h @@ -28,7 +28,7 @@ struct amba_id { struct amba_driver { struct device_driver drv; - int (*probe)(struct amba_device *, void *); + int (*probe)(struct amba_device *, struct amba_id *); int (*remove)(struct amba_device *); void (*shutdown)(struct amba_device *); int (*suspend)(struct amba_device *, pm_message_t); diff --git a/include/linux/amba/serial.h b/include/linux/amba/serial.h index 48ee32a18ac..64a982ea5d5 100644 --- a/include/linux/amba/serial.h +++ b/include/linux/amba/serial.h @@ -159,6 +159,7 @@ #define UART01x_FR_MODEM_ANY (UART01x_FR_DCD|UART01x_FR_DSR|UART01x_FR_CTS) #ifndef __ASSEMBLY__ +struct amba_device; /* in uncompress this is included but amba/bus.h is not */ struct amba_pl010_data { void (*set_mctrl)(struct amba_device *dev, void __iomem *base, unsigned int mctrl); }; diff --git a/include/linux/ata.h b/include/linux/ata.h index cb79b7a208e..915da43edee 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h @@ -730,6 +730,34 @@ static inline int ata_id_has_unload(const u16 *id) return 0; } +static inline int ata_id_form_factor(const u16 *id) +{ + u16 val = id[168]; + + if (ata_id_major_version(id) < 7 || val == 0 || val == 0xffff) + return 0; + + val &= 0xf; + + if (val > 5) + return 0; + + return val; +} + +static inline int ata_id_rotation_rate(const u16 *id) +{ + u16 val = id[217]; + + if (ata_id_major_version(id) < 7 || val == 0 || val == 0xffff) + return 0; + + if (val > 1 && val < 0x401) + return 0; + + return val; +} + static inline int ata_id_has_trim(const u16 *id) { if (ata_id_major_version(id) >= 7 && diff --git a/include/linux/auto_fs.h b/include/linux/auto_fs.h index 63265852b7d..7b09c8348fd 100644 --- a/include/linux/auto_fs.h +++ b/include/linux/auto_fs.h @@ -14,13 +14,12 @@ #ifndef _LINUX_AUTO_FS_H #define _LINUX_AUTO_FS_H +#include #ifdef __KERNEL__ #include #include -#include #include #else -#include #include #endif /* __KERNEL__ */ diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 9f315382610..c5ac87ca7bc 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -1022,6 +1022,8 @@ typedef struct cpumask *cpumask_var_t; bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node); bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags); +bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node); +bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags); void alloc_bootmem_cpumask_var(cpumask_var_t *mask); void free_cpumask_var(cpumask_var_t mask); void free_bootmem_cpumask_var(cpumask_var_t mask); @@ -1040,6 +1042,19 @@ static inline bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, return true; } +static inline bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) +{ + cpumask_clear(*mask); + return true; +} + +static inline bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, + int node) +{ + cpumask_clear(*mask); + return true; +} + static inline void alloc_bootmem_cpumask_var(cpumask_var_t *mask) { } diff --git a/include/linux/cred.h b/include/linux/cred.h index 3282ee4318e..4fa99969631 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h @@ -13,6 +13,7 @@ #define _LINUX_CRED_H #include +#include #include #include diff --git a/include/linux/device.h b/include/linux/device.h index 6a69caaac18..5d5c197bad4 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -384,13 +384,8 @@ struct device { struct device_driver *driver; /* which driver has allocated this device */ void *driver_data; /* data private to the driver */ - - void *platform_data; /* We will remove platform_data - field if all platform devices - pass its platform specific data - from platform_device->platform_data, - other kind of devices should not - use platform_data. */ + void *platform_data; /* Platform specific data, device + core doesn't touch it */ struct dev_pm_info power; #ifdef CONFIG_NUMA diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 2e2aa3df170..ffefba81c81 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -78,12 +78,18 @@ enum dma_transaction_type { * dependency chains * @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s) * @DMA_COMPL_SKIP_DEST_UNMAP - set to disable dma-unmapping the destination(s) + * @DMA_COMPL_SRC_UNMAP_SINGLE - set to do the source dma-unmapping as single + * (if not set, do the source dma-unmapping as page) + * @DMA_COMPL_DEST_UNMAP_SINGLE - set to do the destination dma-unmapping as single + * (if not set, do the destination dma-unmapping as page) */ enum dma_ctrl_flags { DMA_PREP_INTERRUPT = (1 << 0), DMA_CTRL_ACK = (1 << 1), DMA_COMPL_SKIP_SRC_UNMAP = (1 << 2), DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3), + DMA_COMPL_SRC_UNMAP_SINGLE = (1 << 4), + DMA_COMPL_DEST_UNMAP_SINGLE = (1 << 5), }; /** diff --git a/include/linux/fs.h b/include/linux/fs.h index 5bed436f435..3b534e527e0 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1775,6 +1775,7 @@ void kill_block_super(struct super_block *sb); void kill_anon_super(struct super_block *sb); void kill_litter_super(struct super_block *sb); void deactivate_super(struct super_block *sb); +void deactivate_locked_super(struct super_block *sb); int set_anon_super(struct super_block *s, void *data); struct super_block *sget(struct file_system_type *type, int (*test)(struct super_block *,void *), @@ -2117,7 +2118,7 @@ extern struct file *create_write_pipe(int flags); extern void free_write_pipe(struct file *); extern struct file *do_filp_open(int dfd, const char *pathname, - int open_flag, int mode); + int open_flag, int mode, int acc_mode); extern int may_open(struct path *, int, int); extern int kernel_read(struct file *, unsigned long, char *, unsigned long); @@ -2367,6 +2368,7 @@ extern void file_update_time(struct file *file); extern int generic_show_options(struct seq_file *m, struct vfsmount *mnt); extern void save_mount_options(struct super_block *sb, char *options); +extern void replace_mount_options(struct super_block *sb, char *options); static inline ino_t parent_ino(struct dentry *dentry) { diff --git a/include/linux/i7300_idle.h b/include/linux/i7300_idle.h index 05a80c44513..1587b7dec50 100644 --- a/include/linux/i7300_idle.h +++ b/include/linux/i7300_idle.h @@ -16,35 +16,33 @@ struct fbd_ioat { unsigned int vendor; unsigned int ioat_dev; + unsigned int enabled; }; /* * The i5000 chip-set has the same hooks as the i7300 - * but support is disabled by default because this driver - * has not been validated on that platform. + * but it is not enabled by default and must be manually + * manually enabled with "forceload=1" because it is + * only lightly validated. */ -#define SUPPORT_I5000 0 static const struct fbd_ioat fbd_ioat_list[] = { - {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_CNB}, -#if SUPPORT_I5000 - {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT}, -#endif + {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_CNB, 1}, + {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT, 0}, {0, 0} }; /* table of devices that work with this driver */ static const struct pci_device_id pci_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_FBD_CNB) }, -#if SUPPORT_I5000 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5000_ERR) }, -#endif { } /* Terminating entry */ }; /* Check for known platforms with I/O-AT */ static inline int i7300_idle_platform_probe(struct pci_dev **fbd_dev, - struct pci_dev **ioat_dev) + struct pci_dev **ioat_dev, + int enable_all) { int i; struct pci_dev *memdev, *dmadev; @@ -69,6 +67,8 @@ static inline int i7300_idle_platform_probe(struct pci_dev **fbd_dev, for (i = 0; fbd_ioat_list[i].vendor != 0; i++) { if (dmadev->vendor == fbd_ioat_list[i].vendor && dmadev->device == fbd_ioat_list[i].ioat_dev) { + if (!(fbd_ioat_list[i].enabled || enable_all)) + continue; if (fbd_dev) *fbd_dev = memdev; if (ioat_dev) diff --git a/include/linux/ide.h b/include/linux/ide.h index ff65fffb078..9fed365a598 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h @@ -1109,7 +1109,7 @@ void ide_fix_driveid(u16 *); extern void ide_fixstring(u8 *, const int, const int); -int ide_busy_sleep(ide_hwif_t *, unsigned long, int); +int ide_busy_sleep(ide_drive_t *, unsigned long, int); int ide_wait_stat(ide_startstop_t *, ide_drive_t *, u8, u8, unsigned long); diff --git a/include/linux/input.h b/include/linux/input.h index 0e6ff5de358..6fed4f6a9c9 100644 --- a/include/linux/input.h +++ b/include/linux/input.h @@ -656,6 +656,7 @@ struct input_absinfo { #define ABS_MT_POSITION_Y 0x36 /* Center Y ellipse position */ #define ABS_MT_TOOL_TYPE 0x37 /* Type of touching device */ #define ABS_MT_BLOB_ID 0x38 /* Group a set of packets as a blob */ +#define ABS_MT_TRACKING_ID 0x39 /* Unique ID of initiated contact */ #define ABS_MAX 0x3f #define ABS_CNT (ABS_MAX+1) diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 186ec6ab334..a47c879e130 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -1097,6 +1097,32 @@ unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long); #define pfn_valid_within(pfn) (1) #endif +#ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL +/* + * pfn_valid() is meant to be able to tell if a given PFN has valid memmap + * associated with it or not. In FLATMEM, it is expected that holes always + * have valid memmap as long as there is valid PFNs either side of the hole. + * In SPARSEMEM, it is assumed that a valid section has a memmap for the + * entire section. + * + * However, an ARM, and maybe other embedded architectures in the future + * free memmap backing holes to save memory on the assumption the memmap is + * never used. The page_zone linkages are then broken even though pfn_valid() + * returns true. A walker of the full memmap must then do this additional + * check to ensure the memmap they are looking at is sane by making sure + * the zone and PFN linkages are still valid. This is expensive, but walkers + * of the full memmap are extremely rare. + */ +int memmap_valid_within(unsigned long pfn, + struct page *page, struct zone *zone); +#else +static inline int memmap_valid_within(unsigned long pfn, + struct page *page, struct zone *zone) +{ + return 1; +} +#endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */ + #endif /* !__GENERATING_BOUNDS.H */ #endif /* !__ASSEMBLY__ */ #endif /* _LINUX_MMZONE_H */ diff --git a/include/linux/namei.h b/include/linux/namei.h index fc2e0357987..518098fe63a 100644 --- a/include/linux/namei.h +++ b/include/linux/namei.h @@ -69,7 +69,6 @@ extern int path_lookup(const char *, unsigned, struct nameidata *); extern int vfs_path_lookup(struct dentry *, struct vfsmount *, const char *, unsigned int, struct nameidata *); -extern int path_lookup_open(int dfd, const char *name, unsigned lookup_flags, struct nameidata *, int open_flags); extern struct file *lookup_instantiate_filp(struct nameidata *nd, struct dentry *dentry, int (*open)(struct inode *, struct file *)); extern struct file *nameidata_to_filp(struct nameidata *nd, int flags); diff --git a/include/linux/net_dropmon.h b/include/linux/net_dropmon.h index 0217fb81a63..0e2e100c44a 100644 --- a/include/linux/net_dropmon.h +++ b/include/linux/net_dropmon.h @@ -1,6 +1,7 @@ #ifndef __NET_DROPMON_H #define __NET_DROPMON_H +#include #include struct net_dm_drop_point { diff --git a/include/linux/netfilter/nf_conntrack_tcp.h b/include/linux/netfilter/nf_conntrack_tcp.h index 3066789b972..b2f384d4261 100644 --- a/include/linux/netfilter/nf_conntrack_tcp.h +++ b/include/linux/netfilter/nf_conntrack_tcp.h @@ -35,6 +35,9 @@ enum tcp_conntrack { /* Has unacknowledged data */ #define IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED 0x10 +/* The field td_maxack has been set */ +#define IP_CT_TCP_FLAG_MAXACK_SET 0x20 + struct nf_ct_tcp_flags { __u8 flags; __u8 mask; @@ -46,6 +49,7 @@ struct ip_ct_tcp_state { u_int32_t td_end; /* max of seq + len */ u_int32_t td_maxend; /* max of ack + max(win, 1) */ u_int32_t td_maxwin; /* max(win) */ + u_int32_t td_maxack; /* max of ack */ u_int8_t td_scale; /* window scale factor */ u_int8_t flags; /* per direction options */ }; diff --git a/include/linux/netfilter/xt_LED.h b/include/linux/netfilter/xt_LED.h index 4c91a0d770d..f5509e7524d 100644 --- a/include/linux/netfilter/xt_LED.h +++ b/include/linux/netfilter/xt_LED.h @@ -1,6 +1,8 @@ #ifndef _XT_LED_H #define _XT_LED_H +#include + struct xt_led_info { char id[27]; /* Unique ID for this trigger in the LED class */ __u8 always_blink; /* Blink even if the LED is already on */ diff --git a/include/linux/netfilter/xt_cluster.h b/include/linux/netfilter/xt_cluster.h index 5e0a0d07b52..886682656f0 100644 --- a/include/linux/netfilter/xt_cluster.h +++ b/include/linux/netfilter/xt_cluster.h @@ -12,4 +12,6 @@ struct xt_cluster_match_info { u_int32_t flags; }; +#define XT_CLUSTER_NODES_MAX 32 + #endif /* _XT_CLUSTER_MATCH_H */ diff --git a/include/linux/parport.h b/include/linux/parport.h index e1f83c5065c..38a423ed3c0 100644 --- a/include/linux/parport.h +++ b/include/linux/parport.h @@ -324,6 +324,10 @@ struct parport { int spintime; atomic_t ref_count; + unsigned long devflags; +#define PARPORT_DEVPROC_REGISTERED 0 + struct pardevice *proc_device; /* Currently register proc device */ + struct list_head full_list; struct parport *slaves[3]; }; diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 06ba90c211a..0f71812d67d 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -1406,7 +1406,7 @@ #define PCI_DEVICE_ID_VIA_82C598_1 0x8598 #define PCI_DEVICE_ID_VIA_838X_1 0xB188 #define PCI_DEVICE_ID_VIA_83_87XX_1 0xB198 -#define PCI_DEVICE_ID_VIA_C409_IDE 0XC409 +#define PCI_DEVICE_ID_VIA_VX855_IDE 0xC409 #define PCI_DEVICE_ID_VIA_ANON 0xFFFF #define PCI_VENDOR_ID_SIEMENS 0x110A diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h index 72736fd8223..b67bb5d7b22 100644 --- a/include/linux/platform_device.h +++ b/include/linux/platform_device.h @@ -20,7 +20,6 @@ struct platform_device { struct device dev; u32 num_resources; struct resource * resource; - void *platform_data; struct platform_device_id *id_entry; }; diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h index 6b361d23a49..6473650c28f 100644 --- a/include/linux/reiserfs_fs_sb.h +++ b/include/linux/reiserfs_fs_sb.h @@ -402,7 +402,7 @@ struct reiserfs_sb_info { int reserved_blocks; /* amount of blocks reserved for further allocations */ spinlock_t bitmap_lock; /* this lock on now only used to protect reserved_blocks variable */ struct dentry *priv_root; /* root of /.reiserfs_priv */ - struct dentry *xattr_root; /* root of /.reiserfs_priv/.xa */ + struct dentry *xattr_root; /* root of /.reiserfs_priv/xattrs */ int j_errno; #ifdef CONFIG_QUOTA char *s_qf_names[MAXQUOTAS]; @@ -488,7 +488,6 @@ enum reiserfs_mount_options { #define reiserfs_data_log(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_DATA_LOG)) #define reiserfs_data_ordered(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_DATA_ORDERED)) #define reiserfs_data_writeback(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_DATA_WRITEBACK)) -#define reiserfs_xattrs(s) ((s)->s_xattr != NULL) #define reiserfs_xattrs_user(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_XATTRS_USER)) #define reiserfs_posixacl(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_POSIXACL)) #define reiserfs_xattrs_optional(s) (reiserfs_xattrs_user(s) || reiserfs_posixacl(s)) diff --git a/include/linux/reiserfs_xattr.h b/include/linux/reiserfs_xattr.h index dcae01e63e4..99928dce37e 100644 --- a/include/linux/reiserfs_xattr.h +++ b/include/linux/reiserfs_xattr.h @@ -38,8 +38,10 @@ struct nameidata; int reiserfs_xattr_register_handlers(void) __init; void reiserfs_xattr_unregister_handlers(void); int reiserfs_xattr_init(struct super_block *sb, int mount_flags); +int reiserfs_lookup_privroot(struct super_block *sb); int reiserfs_delete_xattrs(struct inode *inode); int reiserfs_chown_xattrs(struct inode *inode, struct iattr *attrs); +int reiserfs_permission(struct inode *inode, int mask); #ifdef CONFIG_REISERFS_FS_XATTR #define has_xattr_dir(inode) (REISERFS_I(inode)->i_flags & i_has_xattr_dir) @@ -49,7 +51,6 @@ int reiserfs_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags); ssize_t reiserfs_listxattr(struct dentry *dentry, char *buffer, size_t size); int reiserfs_removexattr(struct dentry *dentry, const char *name); -int reiserfs_permission(struct inode *inode, int mask); int reiserfs_xattr_get(struct inode *, const char *, void *, size_t); int reiserfs_xattr_set(struct inode *, const char *, const void *, size_t, int); @@ -97,7 +98,7 @@ static inline size_t reiserfs_xattr_jcreate_nblocks(struct inode *inode) if ((REISERFS_I(inode)->i_flags & i_has_xattr_dir) == 0) { nblocks += JOURNAL_BLOCKS_PER_OBJECT(inode->i_sb); - if (REISERFS_SB(inode->i_sb)->xattr_root == NULL) + if (!REISERFS_SB(inode->i_sb)->xattr_root->d_inode) nblocks += JOURNAL_BLOCKS_PER_OBJECT(inode->i_sb); } @@ -116,8 +117,6 @@ static inline void reiserfs_init_xattr_rwsem(struct inode *inode) #define reiserfs_listxattr NULL #define reiserfs_removexattr NULL -#define reiserfs_permission NULL - static inline void reiserfs_init_xattr_rwsem(struct inode *inode) { } diff --git a/include/linux/romfs_fs.h b/include/linux/romfs_fs.h index e20bbf9eb36..c490fbc43fe 100644 --- a/include/linux/romfs_fs.h +++ b/include/linux/romfs_fs.h @@ -53,9 +53,4 @@ struct romfs_inode { #define ROMFH_PAD (ROMFH_SIZE-1) #define ROMFH_MASK (~ROMFH_PAD) -#ifdef __KERNEL__ - -/* Not much now */ - -#endif /* __KERNEL__ */ #endif diff --git a/include/linux/swap.h b/include/linux/swap.h index 62d81435347..d476aad3ff5 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -437,6 +437,11 @@ static inline int mem_cgroup_cache_charge_swapin(struct page *page, return 0; } +static inline void +mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent) +{ +} + #endif /* CONFIG_SWAP */ #endif /* __KERNEL__*/ #endif /* _LINUX_SWAP_H */ diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 40617c1d897..30520844b8d 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -433,6 +433,7 @@ asmlinkage long sys_fcntl(unsigned int fd, unsigned int cmd, unsigned long arg); asmlinkage long sys_fcntl64(unsigned int fd, unsigned int cmd, unsigned long arg); #endif +asmlinkage long sys_pipe2(int __user *fildes, int flags); asmlinkage long sys_dup(unsigned int fildes); asmlinkage long sys_dup2(unsigned int oldfd, unsigned int newfd); asmlinkage long sys_dup3(unsigned int oldfd, unsigned int newfd, int flags); diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h index c7aa154f4bf..eb96603d92d 100644 --- a/include/linux/tracehook.h +++ b/include/linux/tracehook.h @@ -259,14 +259,12 @@ static inline void tracehook_finish_clone(struct task_struct *child, /** * tracehook_report_clone - in parent, new child is about to start running - * @trace: return value from tracehook_prepare_clone() * @regs: parent's user register state * @clone_flags: flags from parent's system call * @pid: new child's PID in the parent's namespace * @child: new child task * - * Called after a child is set up, but before it has been started - * running. @trace is the value returned by tracehook_prepare_clone(). + * Called after a child is set up, but before it has been started running. * This is not a good place to block, because the child has not started * yet. Suspend the child here if desired, and then block in * tracehook_report_clone_complete(). This must prevent the child from @@ -276,13 +274,14 @@ static inline void tracehook_finish_clone(struct task_struct *child, * * Called with no locks held, but the child cannot run until this returns. */ -static inline void tracehook_report_clone(int trace, struct pt_regs *regs, +static inline void tracehook_report_clone(struct pt_regs *regs, unsigned long clone_flags, pid_t pid, struct task_struct *child) { - if (unlikely(trace) || unlikely(clone_flags & CLONE_PTRACE)) { + if (unlikely(task_ptrace(child))) { /* - * The child starts up with an immediate SIGSTOP. + * It doesn't matter who attached/attaching to this + * task, the pending SIGSTOP is right in any case. */ sigaddset(&child->pending.signal, SIGSTOP); set_tsk_thread_flag(child, TIF_SIGPENDING); diff --git a/include/linux/writeback.h b/include/linux/writeback.h index 9c1ed1fb6dd..93445477f86 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -168,8 +168,6 @@ void writeback_set_ratelimit(void); /* pdflush.c */ extern int nr_pdflush_threads; /* Global so it can be exported to sysctl read-only. */ -extern int nr_pdflush_threads_max; /* Global so it can be exported to sysctl */ -extern int nr_pdflush_threads_min; /* Global so it can be exported to sysctl */ #endif /* WRITEBACK_H */ diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h index c9184f756ca..68a8d873bbd 100644 --- a/include/scsi/scsi_transport_fc.h +++ b/include/scsi/scsi_transport_fc.h @@ -680,7 +680,7 @@ fc_remote_port_chkready(struct fc_rport *rport) if (rport->roles & FC_PORT_ROLE_FCP_TARGET) result = 0; else if (rport->flags & FC_RPORT_DEVLOSS_PENDING) - result = DID_TRANSPORT_DISRUPTED << 16; + result = DID_IMM_RETRY << 16; else result = DID_NO_CONNECT << 16; break; @@ -688,7 +688,7 @@ fc_remote_port_chkready(struct fc_rport *rport) if (rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT) result = DID_TRANSPORT_FAILFAST << 16; else - result = DID_TRANSPORT_DISRUPTED << 16; + result = DID_IMM_RETRY << 16; break; default: result = DID_NO_CONNECT << 16; diff --git a/include/sound/version.h b/include/sound/version.h index a7e74e23ad2..456f1359e1c 100644 --- a/include/sound/version.h +++ b/include/sound/version.h @@ -1,3 +1,3 @@ /* include/version.h */ -#define CONFIG_SND_VERSION "1.0.19" +#define CONFIG_SND_VERSION "1.0.20" #define CONFIG_SND_DATE "" diff --git a/init/main.c b/init/main.c index 3bbf93be744..d721dad05dd 100644 --- a/init/main.c +++ b/init/main.c @@ -566,8 +566,7 @@ asmlinkage void __init start_kernel(void) tick_init(); boot_cpu_init(); page_address_init(); - printk(KERN_NOTICE); - printk(linux_banner); + printk(KERN_NOTICE "%s", linux_banner); setup_arch(&command_line); mm_init_owner(&init_mm, &init_task); setup_command_line(command_line); diff --git a/ipc/shm.c b/ipc/shm.c index faa46da99eb..42597160048 100644 --- a/ipc/shm.c +++ b/ipc/shm.c @@ -969,10 +969,13 @@ SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg) SYSCALL_DEFINE1(shmdt, char __user *, shmaddr) { struct mm_struct *mm = current->mm; - struct vm_area_struct *vma, *next; + struct vm_area_struct *vma; unsigned long addr = (unsigned long)shmaddr; - loff_t size = 0; int retval = -EINVAL; +#ifdef CONFIG_MMU + loff_t size = 0; + struct vm_area_struct *next; +#endif if (addr & ~PAGE_MASK) return retval; diff --git a/kernel/async.c b/kernel/async.c index 968ef9457d4..27235f5de19 100644 --- a/kernel/async.c +++ b/kernel/async.c @@ -92,19 +92,18 @@ extern int initcall_debug; static async_cookie_t __lowest_in_progress(struct list_head *running) { struct async_entry *entry; + if (!list_empty(running)) { entry = list_first_entry(running, struct async_entry, list); return entry->cookie; - } else if (!list_empty(&async_pending)) { - entry = list_first_entry(&async_pending, - struct async_entry, list); - return entry->cookie; - } else { - /* nothing in progress... next_cookie is "infinity" */ - return next_cookie; } + list_for_each_entry(entry, &async_pending, list) + if (entry->running == running) + return entry->cookie; + + return next_cookie; /* "infinity" value */ } static async_cookie_t lowest_in_progress(struct list_head *running) diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 382109b5bae..a7267bfd376 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -1133,8 +1133,7 @@ static int cgroup_get_sb(struct file_system_type *fs_type, free_cg_links: free_cg_links(&tmp_cg_links); drop_new_super: - up_write(&sb->s_umount); - deactivate_super(sb); + deactivate_locked_super(sb); return ret; } diff --git a/kernel/fork.c b/kernel/fork.c index b9e2edd0072..875ffbdd96d 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1409,7 +1409,7 @@ long do_fork(unsigned long clone_flags, } audit_finish_fork(p); - tracehook_report_clone(trace, regs, clone_flags, nr, p); + tracehook_report_clone(regs, clone_flags, nr, p); /* * We set PF_STARTING at creation in case tracing wants to diff --git a/kernel/futex.c b/kernel/futex.c index eef8cd26b5e..d546b2d53a6 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -193,6 +193,7 @@ static void drop_futex_key_refs(union futex_key *key) * @uaddr: virtual address of the futex * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED * @key: address where result is stored. + * @rw: mapping needs to be read/write (values: VERIFY_READ, VERIFY_WRITE) * * Returns a negative error code or 0 * The key words are stored in *key on success. @@ -203,7 +204,8 @@ static void drop_futex_key_refs(union futex_key *key) * * lock_page() might sleep, the caller should not hold a spinlock. */ -static int get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key) +static int +get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw) { unsigned long address = (unsigned long)uaddr; struct mm_struct *mm = current->mm; @@ -226,7 +228,7 @@ static int get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key) * but access_ok() should be faster than find_vma() */ if (!fshared) { - if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))) + if (unlikely(!access_ok(rw, uaddr, sizeof(u32)))) return -EFAULT; key->private.mm = mm; key->private.address = address; @@ -235,7 +237,7 @@ static int get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key) } again: - err = get_user_pages_fast(address, 1, 0, &page); + err = get_user_pages_fast(address, 1, rw == VERIFY_WRITE, &page); if (err < 0) return err; @@ -677,7 +679,7 @@ static int futex_wake(u32 __user *uaddr, int fshared, int nr_wake, u32 bitset) if (!bitset) return -EINVAL; - ret = get_futex_key(uaddr, fshared, &key); + ret = get_futex_key(uaddr, fshared, &key, VERIFY_READ); if (unlikely(ret != 0)) goto out; @@ -723,10 +725,10 @@ futex_wake_op(u32 __user *uaddr1, int fshared, u32 __user *uaddr2, int ret, op_ret; retry: - ret = get_futex_key(uaddr1, fshared, &key1); + ret = get_futex_key(uaddr1, fshared, &key1, VERIFY_READ); if (unlikely(ret != 0)) goto out; - ret = get_futex_key(uaddr2, fshared, &key2); + ret = get_futex_key(uaddr2, fshared, &key2, VERIFY_WRITE); if (unlikely(ret != 0)) goto out_put_key1; @@ -814,10 +816,10 @@ static int futex_requeue(u32 __user *uaddr1, int fshared, u32 __user *uaddr2, int ret, drop_count = 0; retry: - ret = get_futex_key(uaddr1, fshared, &key1); + ret = get_futex_key(uaddr1, fshared, &key1, VERIFY_READ); if (unlikely(ret != 0)) goto out; - ret = get_futex_key(uaddr2, fshared, &key2); + ret = get_futex_key(uaddr2, fshared, &key2, VERIFY_READ); if (unlikely(ret != 0)) goto out_put_key1; @@ -1140,7 +1142,7 @@ static int futex_wait(u32 __user *uaddr, int fshared, q.bitset = bitset; retry: q.key = FUTEX_KEY_INIT; - ret = get_futex_key(uaddr, fshared, &q.key); + ret = get_futex_key(uaddr, fshared, &q.key, VERIFY_READ); if (unlikely(ret != 0)) goto out; @@ -1330,7 +1332,7 @@ static int futex_lock_pi(u32 __user *uaddr, int fshared, q.pi_state = NULL; retry: q.key = FUTEX_KEY_INIT; - ret = get_futex_key(uaddr, fshared, &q.key); + ret = get_futex_key(uaddr, fshared, &q.key, VERIFY_WRITE); if (unlikely(ret != 0)) goto out; @@ -1594,7 +1596,7 @@ retry: if ((uval & FUTEX_TID_MASK) != task_pid_vnr(current)) return -EPERM; - ret = get_futex_key(uaddr, fshared, &key); + ret = get_futex_key(uaddr, fshared, &key, VERIFY_WRITE); if (unlikely(ret != 0)) goto out; diff --git a/kernel/kexec.c b/kernel/kexec.c index 5a758c6e495..e4983770913 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c @@ -1451,7 +1451,6 @@ int kernel_kexec(void) error = device_suspend(PMSG_FREEZE); if (error) goto Resume_console; - device_pm_lock(); /* At this point, device_suspend() has been called, * but *not* device_power_down(). We *must* * device_power_down() now. Otherwise, drivers for @@ -1489,7 +1488,6 @@ int kernel_kexec(void) enable_nonboot_cpus(); device_power_up(PMSG_RESTORE); Resume_devices: - device_pm_unlock(); device_resume(PMSG_RESTORE); Resume_console: resume_console(); diff --git a/kernel/kgdb.c b/kernel/kgdb.c index e4dcfb2272a..9147a3190c9 100644 --- a/kernel/kgdb.c +++ b/kernel/kgdb.c @@ -1583,8 +1583,8 @@ static void sysrq_handle_gdb(int key, struct tty_struct *tty) static struct sysrq_key_op sysrq_gdb_op = { .handler = sysrq_handle_gdb, - .help_msg = "Gdb", - .action_msg = "GDB", + .help_msg = "debug(G)", + .action_msg = "DEBUG", }; #endif diff --git a/kernel/kmod.c b/kernel/kmod.c index b750675251e..7e95bedb2bf 100644 --- a/kernel/kmod.c +++ b/kernel/kmod.c @@ -370,8 +370,10 @@ struct subprocess_info *call_usermodehelper_setup(char *path, char **argv, sub_info->argv = argv; sub_info->envp = envp; sub_info->cred = prepare_usermodehelper_creds(); - if (!sub_info->cred) + if (!sub_info->cred) { + kfree(sub_info); return NULL; + } out: return sub_info; diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h index a2cc7e9a6e8..699a2ac3a0d 100644 --- a/kernel/lockdep_internals.h +++ b/kernel/lockdep_internals.h @@ -54,9 +54,9 @@ enum { * table (if it's not there yet), and we check it for lock order * conflicts and deadlocks. */ -#define MAX_LOCKDEP_ENTRIES 8192UL +#define MAX_LOCKDEP_ENTRIES 16384UL -#define MAX_LOCKDEP_CHAINS_BITS 14 +#define MAX_LOCKDEP_CHAINS_BITS 15 #define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS) #define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5) diff --git a/kernel/panic.c b/kernel/panic.c index 874ecf1307a..984b3ecbd72 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -340,39 +340,44 @@ void oops_exit(void) } #ifdef WANT_WARN_ON_SLOWPATH -void warn_slowpath_fmt(const char *file, int line, const char *fmt, ...) -{ +struct slowpath_args { + const char *fmt; va_list args; - char function[KSYM_SYMBOL_LEN]; - unsigned long caller = (unsigned long)__builtin_return_address(0); +}; + +static void warn_slowpath_common(const char *file, int line, void *caller, struct slowpath_args *args) +{ const char *board; - sprint_symbol(function, caller); - printk(KERN_WARNING "------------[ cut here ]------------\n"); - printk(KERN_WARNING "WARNING: at %s:%d %s()\n", file, - line, function); + printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller); board = dmi_get_system_info(DMI_PRODUCT_NAME); if (board) printk(KERN_WARNING "Hardware name: %s\n", board); - if (*fmt) { - va_start(args, fmt); - vprintk(fmt, args); - va_end(args); - } + if (args) + vprintk(args->fmt, args->args); print_modules(); dump_stack(); print_oops_end_marker(); add_taint(TAINT_WARN); } + +void warn_slowpath_fmt(const char *file, int line, const char *fmt, ...) +{ + struct slowpath_args args; + + args.fmt = fmt; + va_start(args.args, fmt); + warn_slowpath_common(file, line, __builtin_return_address(0), &args); + va_end(args.args); +} EXPORT_SYMBOL(warn_slowpath_fmt); void warn_slowpath_null(const char *file, int line) { - static const char *empty = ""; - warn_slowpath_fmt(file, line, empty); + warn_slowpath_common(file, line, __builtin_return_address(0), NULL); } EXPORT_SYMBOL(warn_slowpath_null); #endif diff --git a/kernel/power/disk.c b/kernel/power/disk.c index e71ca9cd81b..5cb080e7eeb 100644 --- a/kernel/power/disk.c +++ b/kernel/power/disk.c @@ -215,8 +215,6 @@ static int create_image(int platform_mode) if (error) return error; - device_pm_lock(); - /* At this point, device_suspend() has been called, but *not* * device_power_down(). We *must* call device_power_down() now. * Otherwise, drivers for some devices (e.g. interrupt controllers) @@ -227,7 +225,7 @@ static int create_image(int platform_mode) if (error) { printk(KERN_ERR "PM: Some devices failed to power down, " "aborting hibernation\n"); - goto Unlock; + return error; } error = platform_pre_snapshot(platform_mode); @@ -241,9 +239,9 @@ static int create_image(int platform_mode) local_irq_disable(); - sysdev_suspend(PMSG_FREEZE); + error = sysdev_suspend(PMSG_FREEZE); if (error) { - printk(KERN_ERR "PM: Some devices failed to power down, " + printk(KERN_ERR "PM: Some system devices failed to power down, " "aborting hibernation\n"); goto Enable_irqs; } @@ -280,9 +278,6 @@ static int create_image(int platform_mode) device_power_up(in_suspend ? (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE); - Unlock: - device_pm_unlock(); - return error; } @@ -344,13 +339,11 @@ static int resume_target_kernel(bool platform_mode) { int error; - device_pm_lock(); - error = device_power_down(PMSG_QUIESCE); if (error) { printk(KERN_ERR "PM: Some devices failed to power down, " "aborting resume\n"); - goto Unlock; + return error; } error = platform_pre_restore(platform_mode); @@ -403,9 +396,6 @@ static int resume_target_kernel(bool platform_mode) device_power_up(PMSG_RECOVER); - Unlock: - device_pm_unlock(); - return error; } @@ -464,11 +454,9 @@ int hibernation_platform_enter(void) goto Resume_devices; } - device_pm_lock(); - error = device_power_down(PMSG_HIBERNATE); if (error) - goto Unlock; + goto Resume_devices; error = hibernation_ops->prepare(); if (error) @@ -493,9 +481,6 @@ int hibernation_platform_enter(void) device_power_up(PMSG_RESTORE); - Unlock: - device_pm_unlock(); - Resume_devices: entering_platform_hibernation = false; device_resume(PMSG_RESTORE); diff --git a/kernel/power/main.c b/kernel/power/main.c index f99ed6a75ea..868028280d1 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c @@ -289,12 +289,10 @@ static int suspend_enter(suspend_state_t state) { int error; - device_pm_lock(); - if (suspend_ops->prepare) { error = suspend_ops->prepare(); if (error) - goto Done; + return error; } error = device_power_down(PMSG_SUSPEND); @@ -343,9 +341,6 @@ static int suspend_enter(suspend_state_t state) if (suspend_ops->finish) suspend_ops->finish(); - Done: - device_pm_unlock(); - return error; } diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 0692ab5a0d6..42c317874cf 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -304,6 +304,8 @@ int ptrace_detach(struct task_struct *child, unsigned int data) if (child->ptrace) { child->exit_code = data; dead = __ptrace_detach(current, child); + if (!child->exit_state) + wake_up_process(child); } write_unlock_irq(&tasklist_lock); diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c index 819f17ac796..e1d16c9a768 100644 --- a/kernel/sched_clock.c +++ b/kernel/sched_clock.c @@ -38,7 +38,8 @@ */ unsigned long long __attribute__((weak)) sched_clock(void) { - return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ); + return (unsigned long long)(jiffies - INITIAL_JIFFIES) + * (NSEC_PER_SEC / HZ); } static __read_mostly int sched_clock_running; diff --git a/kernel/sched_cpupri.c b/kernel/sched_cpupri.c index cdd3c89574c..344712a5e3e 100644 --- a/kernel/sched_cpupri.c +++ b/kernel/sched_cpupri.c @@ -165,7 +165,7 @@ int __init_refok cpupri_init(struct cpupri *cp, bool bootmem) vec->count = 0; if (bootmem) alloc_bootmem_cpumask_var(&vec->mask); - else if (!alloc_cpumask_var(&vec->mask, GFP_KERNEL)) + else if (!zalloc_cpumask_var(&vec->mask, GFP_KERNEL)) goto cleanup; } diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index f2c66f8f971..9bf0d2a7304 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c @@ -1591,7 +1591,7 @@ static inline void init_sched_rt_class(void) unsigned int i; for_each_possible_cpu(i) - alloc_cpumask_var_node(&per_cpu(local_cpu_mask, i), + zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i), GFP_KERNEL, cpu_to_node(i)); } #endif /* CONFIG_SMP */ diff --git a/kernel/smp.c b/kernel/smp.c index 858baac568e..ad63d850120 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -52,7 +52,7 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu) switch (action) { case CPU_UP_PREPARE: case CPU_UP_PREPARE_FROZEN: - if (!alloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL, + if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL, cpu_to_node(cpu))) return NOTIFY_BAD; break; diff --git a/kernel/sysctl.c b/kernel/sysctl.c index ea78fa101ad..b2970d56fb7 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -101,7 +101,6 @@ static int __maybe_unused one = 1; static int __maybe_unused two = 2; static unsigned long one_ul = 1; static int one_hundred = 100; -static int one_thousand = 1000; /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */ static unsigned long dirty_bytes_min = 2 * PAGE_SIZE; @@ -1033,28 +1032,6 @@ static struct ctl_table vm_table[] = { .mode = 0444 /* read-only*/, .proc_handler = &proc_dointvec, }, - { - .ctl_name = CTL_UNNUMBERED, - .procname = "nr_pdflush_threads_min", - .data = &nr_pdflush_threads_min, - .maxlen = sizeof nr_pdflush_threads_min, - .mode = 0644 /* read-write */, - .proc_handler = &proc_dointvec_minmax, - .strategy = &sysctl_intvec, - .extra1 = &one, - .extra2 = &nr_pdflush_threads_max, - }, - { - .ctl_name = CTL_UNNUMBERED, - .procname = "nr_pdflush_threads_max", - .data = &nr_pdflush_threads_max, - .maxlen = sizeof nr_pdflush_threads_max, - .mode = 0644 /* read-write */, - .proc_handler = &proc_dointvec_minmax, - .strategy = &sysctl_intvec, - .extra1 = &nr_pdflush_threads_min, - .extra2 = &one_thousand, - }, { .ctl_name = VM_SWAPPINESS, .procname = "swappiness", diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index a884c09006c..cda81ec58d9 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -2380,7 +2380,7 @@ static const char readme_msg[] = "# echo print-parent > /debug/tracing/trace_options\n" "# echo 1 > /debug/tracing/tracing_enabled\n" "# cat /debug/tracing/trace > /tmp/trace.txt\n" - "echo 0 > /debug/tracing/tracing_enabled\n" + "# echo 0 > /debug/tracing/tracing_enabled\n" ; static ssize_t diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 812c28207ba..6cdcf38f2da 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -891,7 +891,6 @@ config DYNAMIC_DEBUG default n depends on PRINTK depends on DEBUG_FS - select PRINTK_DEBUG help Compiles debug level messages into the kernel, which would not diff --git a/lib/cpumask.c b/lib/cpumask.c index 1f71b97de0f..eb23aaa0c7b 100644 --- a/lib/cpumask.c +++ b/lib/cpumask.c @@ -119,6 +119,12 @@ bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node) } EXPORT_SYMBOL(alloc_cpumask_var_node); +bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node) +{ + return alloc_cpumask_var_node(mask, flags | __GFP_ZERO, node); +} +EXPORT_SYMBOL(zalloc_cpumask_var_node); + /** * alloc_cpumask_var - allocate a struct cpumask * @mask: pointer to cpumask_var_t where the cpumask is returned @@ -135,6 +141,12 @@ bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) } EXPORT_SYMBOL(alloc_cpumask_var); +bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) +{ + return alloc_cpumask_var(mask, flags | __GFP_ZERO); +} +EXPORT_SYMBOL(zalloc_cpumask_var); + /** * alloc_bootmem_cpumask_var - allocate a struct cpumask from the bootmem arena. * @mask: pointer to cpumask_var_t where the cpumask is returned diff --git a/mm/filemap.c b/mm/filemap.c index 379ff0bcbf6..1b60f30cebf 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -121,7 +121,6 @@ void __remove_from_page_cache(struct page *page) mapping->nrpages--; __dec_zone_page_state(page, NR_FILE_PAGES); BUG_ON(page_mapped(page)); - mem_cgroup_uncharge_cache_page(page); /* * Some filesystems seem to re-dirty the page even after @@ -145,6 +144,7 @@ void remove_from_page_cache(struct page *page) spin_lock_irq(&mapping->tree_lock); __remove_from_page_cache(page); spin_unlock_irq(&mapping->tree_lock); + mem_cgroup_uncharge_cache_page(page); } static int sync_page(void *word) @@ -476,13 +476,13 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping, if (likely(!error)) { mapping->nrpages++; __inc_zone_page_state(page, NR_FILE_PAGES); + spin_unlock_irq(&mapping->tree_lock); } else { page->mapping = NULL; + spin_unlock_irq(&mapping->tree_lock); mem_cgroup_uncharge_cache_page(page); page_cache_release(page); } - - spin_unlock_irq(&mapping->tree_lock); radix_tree_preload_end(); } else mem_cgroup_uncharge_cache_page(page); diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 28c655ba935..e83ad2c9228 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -316,7 +316,7 @@ static void resv_map_release(struct kref *ref) static struct resv_map *vma_resv_map(struct vm_area_struct *vma) { VM_BUG_ON(!is_vm_hugetlb_page(vma)); - if (!(vma->vm_flags & VM_SHARED)) + if (!(vma->vm_flags & VM_MAYSHARE)) return (struct resv_map *)(get_vma_private_data(vma) & ~HPAGE_RESV_MASK); return NULL; @@ -325,7 +325,7 @@ static struct resv_map *vma_resv_map(struct vm_area_struct *vma) static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map) { VM_BUG_ON(!is_vm_hugetlb_page(vma)); - VM_BUG_ON(vma->vm_flags & VM_SHARED); + VM_BUG_ON(vma->vm_flags & VM_MAYSHARE); set_vma_private_data(vma, (get_vma_private_data(vma) & HPAGE_RESV_MASK) | (unsigned long)map); @@ -334,7 +334,7 @@ static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map) static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags) { VM_BUG_ON(!is_vm_hugetlb_page(vma)); - VM_BUG_ON(vma->vm_flags & VM_SHARED); + VM_BUG_ON(vma->vm_flags & VM_MAYSHARE); set_vma_private_data(vma, get_vma_private_data(vma) | flags); } @@ -353,7 +353,7 @@ static void decrement_hugepage_resv_vma(struct hstate *h, if (vma->vm_flags & VM_NORESERVE) return; - if (vma->vm_flags & VM_SHARED) { + if (vma->vm_flags & VM_MAYSHARE) { /* Shared mappings always use reserves */ h->resv_huge_pages--; } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { @@ -369,14 +369,14 @@ static void decrement_hugepage_resv_vma(struct hstate *h, void reset_vma_resv_huge_pages(struct vm_area_struct *vma) { VM_BUG_ON(!is_vm_hugetlb_page(vma)); - if (!(vma->vm_flags & VM_SHARED)) + if (!(vma->vm_flags & VM_MAYSHARE)) vma->vm_private_data = (void *)0; } /* Returns true if the VMA has associated reserve pages */ static int vma_has_reserves(struct vm_area_struct *vma) { - if (vma->vm_flags & VM_SHARED) + if (vma->vm_flags & VM_MAYSHARE) return 1; if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) return 1; @@ -924,7 +924,7 @@ static long vma_needs_reservation(struct hstate *h, struct address_space *mapping = vma->vm_file->f_mapping; struct inode *inode = mapping->host; - if (vma->vm_flags & VM_SHARED) { + if (vma->vm_flags & VM_MAYSHARE) { pgoff_t idx = vma_hugecache_offset(h, vma, addr); return region_chg(&inode->i_mapping->private_list, idx, idx + 1); @@ -949,7 +949,7 @@ static void vma_commit_reservation(struct hstate *h, struct address_space *mapping = vma->vm_file->f_mapping; struct inode *inode = mapping->host; - if (vma->vm_flags & VM_SHARED) { + if (vma->vm_flags & VM_MAYSHARE) { pgoff_t idx = vma_hugecache_offset(h, vma, addr); region_add(&inode->i_mapping->private_list, idx, idx + 1); @@ -1893,7 +1893,7 @@ retry_avoidcopy: * at the time of fork() could consume its reserves on COW instead * of the full address range. */ - if (!(vma->vm_flags & VM_SHARED) && + if (!(vma->vm_flags & VM_MAYSHARE) && is_vma_resv_set(vma, HPAGE_RESV_OWNER) && old_page != pagecache_page) outside_reserve = 1; @@ -2000,7 +2000,7 @@ retry: clear_huge_page(page, address, huge_page_size(h)); __SetPageUptodate(page); - if (vma->vm_flags & VM_SHARED) { + if (vma->vm_flags & VM_MAYSHARE) { int err; struct inode *inode = mapping->host; @@ -2104,7 +2104,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, goto out_mutex; } - if (!(vma->vm_flags & VM_SHARED)) + if (!(vma->vm_flags & VM_MAYSHARE)) pagecache_page = hugetlbfs_pagecache_page(h, vma, address); } @@ -2289,7 +2289,7 @@ int hugetlb_reserve_pages(struct inode *inode, * to reserve the full area even if read-only as mprotect() may be * called to make the mapping read-write. Assume !vma is a shm mapping */ - if (!vma || vma->vm_flags & VM_SHARED) + if (!vma || vma->vm_flags & VM_MAYSHARE) chg = region_chg(&inode->i_mapping->private_list, from, to); else { struct resv_map *resv_map = resv_map_alloc(); @@ -2330,7 +2330,7 @@ int hugetlb_reserve_pages(struct inode *inode, * consumed reservations are stored in the map. Hence, nothing * else has to be done for private mappings here */ - if (!vma || vma->vm_flags & VM_SHARED) + if (!vma || vma->vm_flags & VM_MAYSHARE) region_add(&inode->i_mapping->private_list, from, to); return 0; } diff --git a/mm/madvise.c b/mm/madvise.c index 36d6ea2b634..b9ce574827c 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -112,14 +112,6 @@ static long madvise_willneed(struct vm_area_struct * vma, if (!file) return -EBADF; - /* - * Page cache readahead assumes page cache pages are order-0 which - * is not the case for hugetlbfs. Do not give a bad return value - * but ignore the advice. - */ - if (vma->vm_flags & VM_HUGETLB) - return 0; - if (file->f_mapping->a_ops->get_xip_mem) { /* no bad return value, but ignore advice */ return 0; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 01c2d8f1468..78eb8552818 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -314,14 +314,6 @@ static struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm) return mem; } -static bool mem_cgroup_is_obsolete(struct mem_cgroup *mem) -{ - if (!mem) - return true; - return css_is_removed(&mem->css); -} - - /* * Call callback function against all cgroup under hierarchy tree. */ @@ -932,7 +924,7 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm, if (unlikely(!mem)) return 0; - VM_BUG_ON(!mem || mem_cgroup_is_obsolete(mem)); + VM_BUG_ON(css_is_removed(&mem->css)); while (1) { int ret; @@ -1488,8 +1480,9 @@ void mem_cgroup_uncharge_cache_page(struct page *page) __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE); } +#ifdef CONFIG_SWAP /* - * called from __delete_from_swap_cache() and drop "page" account. + * called after __delete_from_swap_cache() and drop "page" account. * memcg information is recorded to swap_cgroup of "ent" */ void mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent) @@ -1506,6 +1499,7 @@ void mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent) if (memcg) css_put(&memcg->css); } +#endif #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP /* diff --git a/mm/mmzone.c b/mm/mmzone.c index 16ce8b955dc..f5b7d176021 100644 --- a/mm/mmzone.c +++ b/mm/mmzone.c @@ -6,6 +6,7 @@ #include +#include #include #include @@ -72,3 +73,17 @@ struct zoneref *next_zones_zonelist(struct zoneref *z, *zone = zonelist_zone(z); return z; } + +#ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL +int memmap_valid_within(unsigned long pfn, + struct page *page, struct zone *zone) +{ + if (page_to_pfn(page) != pfn) + return 0; + + if (page_zone(page) != zone) + return 0; + + return 1; +} +#endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */ diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 92bcf1db16b..a7b2460e922 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -284,22 +284,28 @@ static void dump_tasks(const struct mem_cgroup *mem) printk(KERN_INFO "[ pid ] uid tgid total_vm rss cpu oom_adj " "name\n"); do_each_thread(g, p) { - /* - * total_vm and rss sizes do not exist for tasks with a - * detached mm so there's no need to report them. - */ - if (!p->mm) - continue; + struct mm_struct *mm; + if (mem && !task_in_mem_cgroup(p, mem)) continue; if (!thread_group_leader(p)) continue; task_lock(p); + mm = p->mm; + if (!mm) { + /* + * total_vm and rss sizes do not exist for tasks with no + * mm so there's no need to report them; they can't be + * oom killed anyway. + */ + task_unlock(p); + continue; + } printk(KERN_INFO "[%5d] %5d %5d %8lu %8lu %3d %3d %s\n", - p->pid, __task_cred(p)->uid, p->tgid, - p->mm->total_vm, get_mm_rss(p->mm), (int)task_cpu(p), - p->oomkilladj, p->comm); + p->pid, __task_cred(p)->uid, p->tgid, mm->total_vm, + get_mm_rss(mm), (int)task_cpu(p), p->oomkilladj, + p->comm); task_unlock(p); } while_each_thread(g, p); } diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 30351f0063a..bb553c3e955 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -94,12 +94,12 @@ unsigned long vm_dirty_bytes; /* * The interval between `kupdate'-style writebacks */ -unsigned int dirty_writeback_interval = 5 * 100; /* sentiseconds */ +unsigned int dirty_writeback_interval = 5 * 100; /* centiseconds */ /* * The longest time for which data is allowed to remain dirty */ -unsigned int dirty_expire_interval = 30 * 100; /* sentiseconds */ +unsigned int dirty_expire_interval = 30 * 100; /* centiseconds */ /* * Flag that makes the machine dump writes/reads and block dirtyings. @@ -770,7 +770,7 @@ static void wb_kupdate(unsigned long arg) sync_supers(); - oldest_jif = jiffies - msecs_to_jiffies(dirty_expire_interval); + oldest_jif = jiffies - msecs_to_jiffies(dirty_expire_interval * 10); start_jif = jiffies; next_jif = start_jif + msecs_to_jiffies(dirty_writeback_interval * 10); nr_to_write = global_page_state(NR_FILE_DIRTY) + diff --git a/mm/pdflush.c b/mm/pdflush.c index f2caf96993f..235ac440c44 100644 --- a/mm/pdflush.c +++ b/mm/pdflush.c @@ -57,14 +57,6 @@ static DEFINE_SPINLOCK(pdflush_lock); */ int nr_pdflush_threads = 0; -/* - * The max/min number of pdflush threads. R/W by sysctl at - * /proc/sys/vm/nr_pdflush_threads_max/min - */ -int nr_pdflush_threads_max __read_mostly = MAX_PDFLUSH_THREADS; -int nr_pdflush_threads_min __read_mostly = MIN_PDFLUSH_THREADS; - - /* * The time at which the pdflush thread pool last went empty */ @@ -76,7 +68,7 @@ static unsigned long last_empty_jifs; * Thread pool management algorithm: * * - The minimum and maximum number of pdflush instances are bound - * by nr_pdflush_threads_min and nr_pdflush_threads_max. + * by MIN_PDFLUSH_THREADS and MAX_PDFLUSH_THREADS. * * - If there have been no idle pdflush instances for 1 second, create * a new one. @@ -142,13 +134,14 @@ static int __pdflush(struct pdflush_work *my_work) * To throttle creation, we reset last_empty_jifs. */ if (time_after(jiffies, last_empty_jifs + 1 * HZ)) { - if (list_empty(&pdflush_list) && - nr_pdflush_threads < nr_pdflush_threads_max) { - last_empty_jifs = jiffies; - nr_pdflush_threads++; - spin_unlock_irq(&pdflush_lock); - start_one_pdflush_thread(); - spin_lock_irq(&pdflush_lock); + if (list_empty(&pdflush_list)) { + if (nr_pdflush_threads < MAX_PDFLUSH_THREADS) { + last_empty_jifs = jiffies; + nr_pdflush_threads++; + spin_unlock_irq(&pdflush_lock); + start_one_pdflush_thread(); + spin_lock_irq(&pdflush_lock); + } } } @@ -160,7 +153,7 @@ static int __pdflush(struct pdflush_work *my_work) */ if (list_empty(&pdflush_list)) continue; - if (nr_pdflush_threads <= nr_pdflush_threads_min) + if (nr_pdflush_threads <= MIN_PDFLUSH_THREADS) continue; pdf = list_entry(pdflush_list.prev, struct pdflush_work, list); if (time_after(jiffies, pdf->when_i_went_to_sleep + 1 * HZ)) { @@ -266,9 +259,9 @@ static int __init pdflush_init(void) * Pre-set nr_pdflush_threads... If we fail to create, * the count will be decremented. */ - nr_pdflush_threads = nr_pdflush_threads_min; + nr_pdflush_threads = MIN_PDFLUSH_THREADS; - for (i = 0; i < nr_pdflush_threads_min; i++) + for (i = 0; i < MIN_PDFLUSH_THREADS; i++) start_one_pdflush_thread(); return 0; } diff --git a/mm/rmap.c b/mm/rmap.c index 16521664010..23122af3261 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -14,7 +14,7 @@ * Original design by Rik van Riel 2001 * File methods by Dave McCracken 2003, 2004 * Anonymous methods by Andrea Arcangeli 2004 - * Contributions by Hugh Dickins 2003, 2004 + * Contributions by Hugh Dickins 2003, 2004 */ /* diff --git a/mm/slob.c b/mm/slob.c index a2d4ab32198..f92e66d558b 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -60,6 +60,7 @@ #include #include #include +#include /* struct reclaim_state */ #include #include #include @@ -255,6 +256,8 @@ static void *slob_new_pages(gfp_t gfp, int order, int node) static void slob_free_pages(void *b, int order) { + if (current->reclaim_state) + current->reclaim_state->reclaimed_slab += 1 << order; free_pages((unsigned long)b, order); } @@ -407,7 +410,7 @@ static void slob_free(void *block, int size) spin_unlock_irqrestore(&slob_lock, flags); clear_slob_page(sp); free_slob_page(sp); - free_page((unsigned long)b); + slob_free_pages(b, 0); return; } diff --git a/mm/slub.c b/mm/slub.c index 7ab54ecbd3f..65ffda5934b 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -9,6 +9,7 @@ */ #include +#include /* struct reclaim_state */ #include #include #include @@ -1170,6 +1171,8 @@ static void __free_slab(struct kmem_cache *s, struct page *page) __ClearPageSlab(page); reset_page_mapcount(page); + if (current->reclaim_state) + current->reclaim_state->reclaimed_slab += pages; __free_pages(page, order); } @@ -1909,7 +1912,7 @@ static inline int calculate_order(int size) * Doh this slab cannot be placed using slub_max_order. */ order = slab_order(size, 1, MAX_ORDER, 1); - if (order <= MAX_ORDER) + if (order < MAX_ORDER) return order; return -ENOSYS; } @@ -2522,6 +2525,7 @@ __setup("slub_min_order=", setup_slub_min_order); static int __init setup_slub_max_order(char *str) { get_option(&str, &slub_max_order); + slub_max_order = min(slub_max_order, MAX_ORDER - 1); return 1; } diff --git a/mm/swap_state.c b/mm/swap_state.c index 3ecea98ecb4..1416e7e9e02 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -109,8 +109,6 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask) */ void __delete_from_swap_cache(struct page *page) { - swp_entry_t ent = {.val = page_private(page)}; - VM_BUG_ON(!PageLocked(page)); VM_BUG_ON(!PageSwapCache(page)); VM_BUG_ON(PageWriteback(page)); @@ -121,7 +119,6 @@ void __delete_from_swap_cache(struct page *page) total_swapcache_pages--; __dec_zone_page_state(page, NR_FILE_PAGES); INC_CACHE_INFO(del_total); - mem_cgroup_uncharge_swapcache(page, ent); } /** @@ -191,6 +188,7 @@ void delete_from_swap_cache(struct page *page) __delete_from_swap_cache(page); spin_unlock_irq(&swapper_space.tree_lock); + mem_cgroup_uncharge_swapcache(page, entry); swap_free(entry); page_cache_release(page); } diff --git a/mm/truncate.c b/mm/truncate.c index 55206fab7b9..12e1579f916 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -359,6 +359,7 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page) BUG_ON(page_has_private(page)); __remove_from_page_cache(page); spin_unlock_irq(&mapping->tree_lock); + mem_cgroup_uncharge_cache_page(page); page_cache_release(page); /* pagecache ref */ return 1; failed: diff --git a/mm/vmscan.c b/mm/vmscan.c index 5fa3eda1f03..d254306562c 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -470,10 +470,12 @@ static int __remove_mapping(struct address_space *mapping, struct page *page) swp_entry_t swap = { .val = page_private(page) }; __delete_from_swap_cache(page); spin_unlock_irq(&mapping->tree_lock); + mem_cgroup_uncharge_swapcache(page, swap); swap_free(swap); } else { __remove_from_page_cache(page); spin_unlock_irq(&mapping->tree_lock); + mem_cgroup_uncharge_cache_page(page); } return 1; diff --git a/mm/vmstat.c b/mm/vmstat.c index 66f6130976c..74d66dba0cb 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -509,22 +509,11 @@ static void pagetypeinfo_showblockcount_print(struct seq_file *m, continue; page = pfn_to_page(pfn); -#ifdef CONFIG_ARCH_FLATMEM_HAS_HOLES - /* - * Ordinarily, memory holes in flatmem still have a valid - * memmap for the PFN range. However, an architecture for - * embedded systems (e.g. ARM) can free up the memmap backing - * holes to save memory on the assumption the memmap is - * never used. The page_zone linkages are then broken even - * though pfn_valid() returns true. Skip the page if the - * linkages are broken. Even if this test passed, the impact - * is that the counters for the movable type are off but - * fragmentation monitoring is likely meaningless on small - * systems. - */ - if (page_zone(page) != zone) + + /* Watch for unexpected holes punched in the memmap */ + if (!memmap_valid_within(pfn, page, zone)) continue; -#endif + mtype = get_pageblock_migratetype(page); if (mtype < MIGRATE_TYPES) diff --git a/net/Kconfig b/net/Kconfig index ce77db4fcec..c19f549c8e7 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -119,12 +119,6 @@ menuconfig NETFILTER under "iptables" for the location of these packages. - Make sure to say N to "Fast switching" below if you intend to say Y - here, as Fast switching currently bypasses netfilter. - - Chances are that you should say Y here if you compile a kernel which - will run as a router and N for regular hosts. If unsure, say N. - if NETFILTER config NETFILTER_DEBUG diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index 61309b26f27..fa47d5d84f5 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -171,10 +171,8 @@ static void hci_conn_timeout(unsigned long arg) switch (conn->state) { case BT_CONNECT: case BT_CONNECT2: - if (conn->type == ACL_LINK) + if (conn->type == ACL_LINK && conn->out) hci_acl_connect_cancel(conn); - else - hci_acl_disconn(conn, 0x13); break; case BT_CONFIG: case BT_CONNECTED: @@ -292,6 +290,8 @@ int hci_conn_del(struct hci_conn *conn) hci_conn_del_sysfs(conn); + hci_dev_put(hdev); + return 0; } diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 4e7cb88e5da..184ba0a88ec 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -1493,7 +1493,7 @@ static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff hci_dev_lock(hdev); conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); - if (conn) { + if (conn && conn->state == BT_CONNECTED) { hci_conn_hold(conn); conn->disc_timeout = HCI_PAIRING_TIMEOUT; hci_conn_put(conn); diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c index 582d8877078..95f7a7a544b 100644 --- a/net/bluetooth/hci_sysfs.c +++ b/net/bluetooth/hci_sysfs.c @@ -88,14 +88,16 @@ static struct device_type bt_link = { static void add_conn(struct work_struct *work) { struct hci_conn *conn = container_of(work, struct hci_conn, work_add); + struct hci_dev *hdev = conn->hdev; - /* ensure previous del is complete */ - flush_work(&conn->work_del); + dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle); if (device_add(&conn->dev) < 0) { BT_ERR("Failed to register connection device"); return; } + + hci_dev_hold(hdev); } /* @@ -113,9 +115,6 @@ static void del_conn(struct work_struct *work) struct hci_conn *conn = container_of(work, struct hci_conn, work_del); struct hci_dev *hdev = conn->hdev; - /* ensure previous add is complete */ - flush_work(&conn->work_add); - if (!device_is_registered(&conn->dev)) return; @@ -131,6 +130,7 @@ static void del_conn(struct work_struct *work) device_del(&conn->dev); put_device(&conn->dev); + hci_dev_put(hdev); } @@ -154,12 +154,8 @@ void hci_conn_init_sysfs(struct hci_conn *conn) void hci_conn_add_sysfs(struct hci_conn *conn) { - struct hci_dev *hdev = conn->hdev; - BT_DBG("conn %p", conn); - dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle); - queue_work(bt_workq, &conn->work_add); } diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index 30b88777c3d..5ee1a3682bf 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c @@ -134,6 +134,10 @@ struct sk_buff *br_handle_frame(struct net_bridge_port *p, struct sk_buff *skb) if (skb->protocol == htons(ETH_P_PAUSE)) goto drop; + /* If STP is turned off, then forward */ + if (p->br->stp_enabled == BR_NO_STP && dest[5] == 0) + goto forward; + if (NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_IN, skb, skb->dev, NULL, br_handle_local_finish)) return NULL; /* frame consumed by filter */ @@ -141,6 +145,7 @@ struct sk_buff *br_handle_frame(struct net_bridge_port *p, struct sk_buff *skb) return skb; /* continue processing */ } +forward: switch (p->state) { case BR_STATE_FORWARDING: rhook = rcu_dereference(br_should_route_hook); diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c index 6e63ec3f1fc..0660515f399 100644 --- a/net/bridge/br_stp.c +++ b/net/bridge/br_stp.c @@ -297,6 +297,9 @@ void br_topology_change_detection(struct net_bridge *br) { int isroot = br_is_root_bridge(br); + if (br->stp_enabled != BR_KERNEL_STP) + return; + pr_info("%s: topology change detected, %s\n", br->dev->name, isroot ? "propagating" : "sending tcn bpdu"); diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c index 9cc9f95b109..6d62d4618cf 100644 --- a/net/core/gen_estimator.c +++ b/net/core/gen_estimator.c @@ -66,9 +66,9 @@ NOTES. - * The stored value for avbps is scaled by 2^5, so that maximal - rate is ~1Gbit, avpps is scaled by 2^10. - + * avbps is scaled by 2^5, avpps is scaled by 2^10. + * both values are reported as 32 bit unsigned values. bps can + overflow for fast links : max speed being 34360Mbit/sec * Minimal interval is HZ/4=250msec (it is the greatest common divisor for HZ=100 and HZ=1024 8)), maximal interval is (HZ*2^EST_MAX_INTERVAL)/4 = 8sec. Shorter intervals @@ -86,9 +86,9 @@ struct gen_estimator spinlock_t *stats_lock; int ewma_log; u64 last_bytes; + u64 avbps; u32 last_packets; u32 avpps; - u32 avbps; struct rcu_head e_rcu; struct rb_node node; }; @@ -115,6 +115,7 @@ static void est_timer(unsigned long arg) rcu_read_lock(); list_for_each_entry_rcu(e, &elist[idx].list, list) { u64 nbytes; + u64 brate; u32 npackets; u32 rate; @@ -125,9 +126,9 @@ static void est_timer(unsigned long arg) nbytes = e->bstats->bytes; npackets = e->bstats->packets; - rate = (nbytes - e->last_bytes)<<(7 - idx); + brate = (nbytes - e->last_bytes)<<(7 - idx); e->last_bytes = nbytes; - e->avbps += ((long)rate - (long)e->avbps) >> e->ewma_log; + e->avbps += ((s64)(brate - e->avbps)) >> e->ewma_log; e->rate_est->bps = (e->avbps+0xF)>>5; rate = (npackets - e->last_packets)<<(12 - idx); diff --git a/net/core/netpoll.c b/net/core/netpoll.c index b5873bdff61..64f51eec657 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -175,9 +175,13 @@ static void service_arp_queue(struct netpoll_info *npi) void netpoll_poll(struct netpoll *np) { struct net_device *dev = np->dev; - const struct net_device_ops *ops = dev->netdev_ops; + const struct net_device_ops *ops; - if (!dev || !netif_running(dev) || !ops->ndo_poll_controller) + if (!dev || !netif_running(dev)) + return; + + ops = dev->netdev_ops; + if (!ops->ndo_poll_controller) return; /* Process pending work on NIC */ diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 3779c1438c1..0666a827bc6 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -2447,7 +2447,7 @@ static inline void free_SAs(struct pktgen_dev *pkt_dev) if (pkt_dev->cflows) { /* let go of the SAs if we have them */ int i = 0; - for (; i < pkt_dev->nflows; i++){ + for (; i < pkt_dev->cflows; i++) { struct xfrm_state *x = pkt_dev->flows[i].x; if (x) { xfrm_state_put(x); diff --git a/net/core/skbuff.c b/net/core/skbuff.c index f091a5a845c..e505b5392e1 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -502,7 +502,9 @@ int skb_recycle_check(struct sk_buff *skb, int skb_size) shinfo->gso_segs = 0; shinfo->gso_type = 0; shinfo->ip6_frag_id = 0; + shinfo->tx_flags.flags = 0; shinfo->frag_list = NULL; + memset(&shinfo->hwtstamps, 0, sizeof(shinfo->hwtstamps)); memset(skb, 0, offsetof(struct sk_buff, tail)); skb->data = skb->head + NET_SKB_PAD; @@ -2286,7 +2288,7 @@ unsigned int skb_seq_read(unsigned int consumed, const u8 **data, next_skb: block_limit = skb_headlen(st->cur_skb) + st->stepped_offset; - if (abs_offset < block_limit) { + if (abs_offset < block_limit && !st->frag_data) { *data = st->cur_skb->data + (abs_offset - st->stepped_offset); return block_limit - abs_offset; } diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig index b2cf91e4cca..5b919f7b45d 100644 --- a/net/ipv4/Kconfig +++ b/net/ipv4/Kconfig @@ -407,8 +407,8 @@ config INET_XFRM_MODE_BEET If unsure, say Y. config INET_LRO - tristate "Large Receive Offload (ipv4/tcp)" - + bool "Large Receive Offload (ipv4/tcp)" + default y ---help--- Support for Large Receive Offload (ipv4/tcp). diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index ec0ae490f0b..33c7c85dfe4 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -986,9 +986,12 @@ fib_find_node(struct trie *t, u32 key) static struct node *trie_rebalance(struct trie *t, struct tnode *tn) { int wasfull; - t_key cindex, key = tn->key; + t_key cindex, key; struct tnode *tp; + preempt_disable(); + key = tn->key; + while (tn != NULL && (tp = node_parent((struct node *)tn)) != NULL) { cindex = tkey_extract_bits(key, tp->pos, tp->bits); wasfull = tnode_full(tp, tnode_get_child(tp, cindex)); @@ -1007,6 +1010,7 @@ static struct node *trie_rebalance(struct trie *t, struct tnode *tn) if (IS_TNODE(tn)) tn = (struct tnode *)resize(t, (struct tnode *)tn); + preempt_enable(); return (struct node *)tn; } diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index 90d22ae0a41..88bf051d0cb 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c @@ -139,6 +139,8 @@ __be32 ic_servaddr = NONE; /* Boot server IP address */ __be32 root_server_addr = NONE; /* Address of NFS server */ u8 root_server_path[256] = { 0, }; /* Path to mount as root */ +u32 ic_dev_xid; /* Device under configuration */ + /* vendor class identifier */ static char vendor_class_identifier[253] __initdata; @@ -932,6 +934,13 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str goto drop_unlock; } + /* Is it a reply for the device we are configuring? */ + if (b->xid != ic_dev_xid) { + if (net_ratelimit()) + printk(KERN_ERR "DHCP/BOOTP: Ignoring delayed packet \n"); + goto drop_unlock; + } + /* Parse extensions */ if (ext_len >= 4 && !memcmp(b->exten, ic_bootp_cookie, 4)) { /* Check magic cookie */ @@ -1115,6 +1124,9 @@ static int __init ic_dynamic(void) get_random_bytes(&timeout, sizeof(timeout)); timeout = CONF_BASE_TIMEOUT + (timeout % (unsigned) CONF_TIMEOUT_RANDOM); for (;;) { + /* Track the device we are configuring */ + ic_dev_xid = d->xid; + #ifdef IPCONFIG_BOOTP if (do_bootp && (d->able & IC_BOOTP)) ic_bootp_send_if(d, jiffies - start_jiffies); diff --git a/net/ipv4/route.c b/net/ipv4/route.c index c4c60e9f068..28205e5bfa9 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -784,8 +784,8 @@ static void rt_check_expire(void) { static unsigned int rover; unsigned int i = rover, goal; - struct rtable *rth, **rthp; - unsigned long length = 0, samples = 0; + struct rtable *rth, *aux, **rthp; + unsigned long samples = 0; unsigned long sum = 0, sum2 = 0; u64 mult; @@ -795,9 +795,9 @@ static void rt_check_expire(void) goal = (unsigned int)mult; if (goal > rt_hash_mask) goal = rt_hash_mask + 1; - length = 0; for (; goal > 0; goal--) { unsigned long tmo = ip_rt_gc_timeout; + unsigned long length; i = (i + 1) & rt_hash_mask; rthp = &rt_hash_table[i].chain; @@ -809,8 +809,10 @@ static void rt_check_expire(void) if (*rthp == NULL) continue; + length = 0; spin_lock_bh(rt_hash_lock_addr(i)); while ((rth = *rthp) != NULL) { + prefetch(rth->u.dst.rt_next); if (rt_is_expired(rth)) { *rthp = rth->u.dst.rt_next; rt_free(rth); @@ -819,33 +821,30 @@ static void rt_check_expire(void) if (rth->u.dst.expires) { /* Entry is expired even if it is in use */ if (time_before_eq(jiffies, rth->u.dst.expires)) { +nofree: tmo >>= 1; rthp = &rth->u.dst.rt_next; /* - * Only bump our length if the hash - * inputs on entries n and n+1 are not - * the same, we only count entries on + * We only count entries on * a chain with equal hash inputs once * so that entries for different QOS * levels, and other non-hash input * attributes don't unfairly skew * the length computation */ - if ((*rthp == NULL) || - !compare_hash_inputs(&(*rthp)->fl, - &rth->fl)) - length += ONE; + for (aux = rt_hash_table[i].chain;;) { + if (aux == rth) { + length += ONE; + break; + } + if (compare_hash_inputs(&aux->fl, &rth->fl)) + break; + aux = aux->u.dst.rt_next; + } continue; } - } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout)) { - tmo >>= 1; - rthp = &rth->u.dst.rt_next; - if ((*rthp == NULL) || - !compare_hash_inputs(&(*rthp)->fl, - &rth->fl)) - length += ONE; - continue; - } + } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout)) + goto nofree; /* Cleanup aged off entries. */ *rthp = rth->u.dst.rt_next; @@ -1068,7 +1067,6 @@ out: return 0; static int rt_intern_hash(unsigned hash, struct rtable *rt, struct rtable **rp) { struct rtable *rth, **rthp; - struct rtable *rthi; unsigned long now; struct rtable *cand, **candp; u32 min_score; @@ -1088,7 +1086,6 @@ restart: } rthp = &rt_hash_table[hash].chain; - rthi = NULL; spin_lock_bh(rt_hash_lock_addr(hash)); while ((rth = *rthp) != NULL) { @@ -1134,17 +1131,6 @@ restart: chain_length++; rthp = &rth->u.dst.rt_next; - - /* - * check to see if the next entry in the chain - * contains the same hash input values as rt. If it does - * This is where we will insert into the list, instead of - * at the head. This groups entries that differ by aspects not - * relvant to the hash function together, which we use to adjust - * our chain length - */ - if (*rthp && compare_hash_inputs(&(*rthp)->fl, &rt->fl)) - rthi = rth; } if (cand) { @@ -1205,10 +1191,7 @@ restart: } } - if (rthi) - rt->u.dst.rt_next = rthi->u.dst.rt_next; - else - rt->u.dst.rt_next = rt_hash_table[hash].chain; + rt->u.dst.rt_next = rt_hash_table[hash].chain; #if RT_CACHE_DEBUG >= 2 if (rt->u.dst.rt_next) { @@ -1224,10 +1207,7 @@ restart: * previous writes to rt are comitted to memory * before making rt visible to other CPUS. */ - if (rthi) - rcu_assign_pointer(rthi->u.dst.rt_next, rt); - else - rcu_assign_pointer(rt_hash_table[hash].chain, rt); + rcu_assign_pointer(rt_hash_table[hash].chain, rt); spin_unlock_bh(rt_hash_lock_addr(hash)); *rp = rt; diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 1d7f49c6f0c..7a0f0b27bf1 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1321,6 +1321,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, struct task_struct *user_recv = NULL; int copied_early = 0; struct sk_buff *skb; + u32 urg_hole = 0; lock_sock(sk); @@ -1532,7 +1533,8 @@ do_prequeue: } } } - if ((flags & MSG_PEEK) && peek_seq != tp->copied_seq) { + if ((flags & MSG_PEEK) && + (peek_seq - copied - urg_hole != tp->copied_seq)) { if (net_ratelimit()) printk(KERN_DEBUG "TCP(%s:%d): Application bug, race in MSG_PEEK.\n", current->comm, task_pid_nr(current)); @@ -1553,6 +1555,7 @@ do_prequeue: if (!urg_offset) { if (!sock_flag(sk, SOCK_URGINLINE)) { ++*seq; + urg_hole++; offset++; used--; if (!used) diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c index a453aac91bd..c6743eec9b7 100644 --- a/net/ipv4/tcp_vegas.c +++ b/net/ipv4/tcp_vegas.c @@ -158,6 +158,11 @@ void tcp_vegas_cwnd_event(struct sock *sk, enum tcp_ca_event event) } EXPORT_SYMBOL_GPL(tcp_vegas_cwnd_event); +static inline u32 tcp_vegas_ssthresh(struct tcp_sock *tp) +{ + return min(tp->snd_ssthresh, tp->snd_cwnd-1); +} + static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) { struct tcp_sock *tp = tcp_sk(sk); @@ -221,11 +226,10 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) */ diff = tp->snd_cwnd * (rtt-vegas->baseRTT) / vegas->baseRTT; - if (diff > gamma && tp->snd_ssthresh > 2 ) { + if (diff > gamma && tp->snd_cwnd <= tp->snd_ssthresh) { /* Going too fast. Time to slow down * and switch to congestion avoidance. */ - tp->snd_ssthresh = 2; /* Set cwnd to match the actual rate * exactly: @@ -235,6 +239,7 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) * utilization. */ tp->snd_cwnd = min(tp->snd_cwnd, (u32)target_cwnd+1); + tp->snd_ssthresh = tcp_vegas_ssthresh(tp); } else if (tp->snd_cwnd <= tp->snd_ssthresh) { /* Slow start. */ @@ -250,6 +255,8 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) * we slow down. */ tp->snd_cwnd--; + tp->snd_ssthresh + = tcp_vegas_ssthresh(tp); } else if (diff < alpha) { /* We don't have enough extra packets * in the network, so speed up. diff --git a/net/ipv6/netfilter/ip6t_ipv6header.c b/net/ipv6/netfilter/ip6t_ipv6header.c index 14e6724d567..91490ad9302 100644 --- a/net/ipv6/netfilter/ip6t_ipv6header.c +++ b/net/ipv6/netfilter/ip6t_ipv6header.c @@ -50,14 +50,14 @@ ipv6header_mt6(const struct sk_buff *skb, const struct xt_match_param *par) struct ipv6_opt_hdr _hdr; int hdrlen; - /* Is there enough space for the next ext header? */ - if (len < (int)sizeof(struct ipv6_opt_hdr)) - return false; /* No more exthdr -> evaluate */ if (nexthdr == NEXTHDR_NONE) { temp |= MASK_NONE; break; } + /* Is there enough space for the next ext header? */ + if (len < (int)sizeof(struct ipv6_opt_hdr)) + return false; /* ESP -> evaluate */ if (nexthdr == NEXTHDR_ESP) { temp |= MASK_ESP; diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 1394ddb6e35..032a5ec391c 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -137,6 +137,7 @@ static struct rt6_info ip6_null_entry_template = { } }, .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP), + .rt6i_protocol = RTPROT_KERNEL, .rt6i_metric = ~(u32) 0, .rt6i_ref = ATOMIC_INIT(1), }; @@ -159,6 +160,7 @@ static struct rt6_info ip6_prohibit_entry_template = { } }, .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP), + .rt6i_protocol = RTPROT_KERNEL, .rt6i_metric = ~(u32) 0, .rt6i_ref = ATOMIC_INIT(1), }; @@ -176,6 +178,7 @@ static struct rt6_info ip6_blk_hole_entry_template = { } }, .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP), + .rt6i_protocol = RTPROT_KERNEL, .rt6i_metric = ~(u32) 0, .rt6i_ref = ATOMIC_INIT(1), }; diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c index 3824990d340..d9233ec5061 100644 --- a/net/mac80211/rc80211_minstrel.c +++ b/net/mac80211/rc80211_minstrel.c @@ -476,8 +476,8 @@ minstrel_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp) return NULL; for (i = 0; i < IEEE80211_NUM_BANDS; i++) { - sband = hw->wiphy->bands[hw->conf.channel->band]; - if (sband->n_bitrates > max_rates) + sband = hw->wiphy->bands[i]; + if (sband && sband->n_bitrates > max_rates) max_rates = sband->n_bitrates; } diff --git a/net/mac80211/rc80211_pid_algo.c b/net/mac80211/rc80211_pid_algo.c index b16801cde06..8bef9a1262f 100644 --- a/net/mac80211/rc80211_pid_algo.c +++ b/net/mac80211/rc80211_pid_algo.c @@ -317,57 +317,17 @@ rate_control_pid_rate_init(void *priv, struct ieee80211_supported_band *sband, struct ieee80211_sta *sta, void *priv_sta) { struct rc_pid_sta_info *spinfo = priv_sta; + struct rc_pid_info *pinfo = priv; + struct rc_pid_rateinfo *rinfo = pinfo->rinfo; struct sta_info *si; + int i, j, tmp; + bool s; /* TODO: This routine should consider using RSSI from previous packets * as we need to have IEEE 802.1X auth succeed immediately after assoc.. * Until that method is implemented, we will use the lowest supported * rate as a workaround. */ - spinfo->txrate_idx = rate_lowest_index(sband, sta); - /* HACK */ - si = container_of(sta, struct sta_info, sta); - si->fail_avg = 0; -} - -static void *rate_control_pid_alloc(struct ieee80211_hw *hw, - struct dentry *debugfsdir) -{ - struct rc_pid_info *pinfo; - struct rc_pid_rateinfo *rinfo; - struct ieee80211_supported_band *sband; - int i, j, tmp; - bool s; -#ifdef CONFIG_MAC80211_DEBUGFS - struct rc_pid_debugfs_entries *de; -#endif - - sband = hw->wiphy->bands[hw->conf.channel->band]; - - pinfo = kmalloc(sizeof(*pinfo), GFP_ATOMIC); - if (!pinfo) - return NULL; - - /* We can safely assume that sband won't change unless we get - * reinitialized. */ - rinfo = kmalloc(sizeof(*rinfo) * sband->n_bitrates, GFP_ATOMIC); - if (!rinfo) { - kfree(pinfo); - return NULL; - } - - pinfo->target = RC_PID_TARGET_PF; - pinfo->sampling_period = RC_PID_INTERVAL; - pinfo->coeff_p = RC_PID_COEFF_P; - pinfo->coeff_i = RC_PID_COEFF_I; - pinfo->coeff_d = RC_PID_COEFF_D; - pinfo->smoothing_shift = RC_PID_SMOOTHING_SHIFT; - pinfo->sharpen_factor = RC_PID_SHARPENING_FACTOR; - pinfo->sharpen_duration = RC_PID_SHARPENING_DURATION; - pinfo->norm_offset = RC_PID_NORM_OFFSET; - pinfo->rinfo = rinfo; - pinfo->oldrate = 0; - /* Sort the rates. This is optimized for the most common case (i.e. * almost-sorted CCK+OFDM rates). Kind of bubble-sort with reversed * mapping too. */ @@ -395,6 +355,51 @@ static void *rate_control_pid_alloc(struct ieee80211_hw *hw, break; } + spinfo->txrate_idx = rate_lowest_index(sband, sta); + /* HACK */ + si = container_of(sta, struct sta_info, sta); + si->fail_avg = 0; +} + +static void *rate_control_pid_alloc(struct ieee80211_hw *hw, + struct dentry *debugfsdir) +{ + struct rc_pid_info *pinfo; + struct rc_pid_rateinfo *rinfo; + struct ieee80211_supported_band *sband; + int i, max_rates = 0; +#ifdef CONFIG_MAC80211_DEBUGFS + struct rc_pid_debugfs_entries *de; +#endif + + pinfo = kmalloc(sizeof(*pinfo), GFP_ATOMIC); + if (!pinfo) + return NULL; + + for (i = 0; i < IEEE80211_NUM_BANDS; i++) { + sband = hw->wiphy->bands[i]; + if (sband && sband->n_bitrates > max_rates) + max_rates = sband->n_bitrates; + } + + rinfo = kmalloc(sizeof(*rinfo) * max_rates, GFP_ATOMIC); + if (!rinfo) { + kfree(pinfo); + return NULL; + } + + pinfo->target = RC_PID_TARGET_PF; + pinfo->sampling_period = RC_PID_INTERVAL; + pinfo->coeff_p = RC_PID_COEFF_P; + pinfo->coeff_i = RC_PID_COEFF_I; + pinfo->coeff_d = RC_PID_COEFF_D; + pinfo->smoothing_shift = RC_PID_SMOOTHING_SHIFT; + pinfo->sharpen_factor = RC_PID_SHARPENING_FACTOR; + pinfo->sharpen_duration = RC_PID_SHARPENING_DURATION; + pinfo->norm_offset = RC_PID_NORM_OFFSET; + pinfo->rinfo = rinfo; + pinfo->oldrate = 0; + #ifdef CONFIG_MAC80211_DEBUGFS de = &pinfo->dentries; de->target = debugfs_create_u32("target_pf", S_IRUSR | S_IWUSR, diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 3fb04a86444..63656266d56 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -772,7 +772,7 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx) hdrlen = ieee80211_hdrlen(hdr->frame_control); /* internal error, why is TX_FRAGMENTED set? */ - if (WARN_ON(skb->len <= frag_threshold)) + if (WARN_ON(skb->len + FCS_LEN <= frag_threshold)) return TX_DROP; /* diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c index 60aba45023f..77bfdfeb966 100644 --- a/net/netfilter/ipvs/ip_vs_conn.c +++ b/net/netfilter/ipvs/ip_vs_conn.c @@ -260,7 +260,10 @@ struct ip_vs_conn *ip_vs_ct_in_get list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) { if (cp->af == af && ip_vs_addr_equal(af, s_addr, &cp->caddr) && - ip_vs_addr_equal(af, d_addr, &cp->vaddr) && + /* protocol should only be IPPROTO_IP if + * d_addr is a fwmark */ + ip_vs_addr_equal(protocol == IPPROTO_IP ? AF_UNSPEC : af, + d_addr, &cp->vaddr) && s_port == cp->cport && d_port == cp->vport && cp->flags & IP_VS_CONN_F_TEMPLATE && protocol == cp->protocol) { @@ -698,7 +701,9 @@ ip_vs_conn_new(int af, int proto, const union nf_inet_addr *caddr, __be16 cport, cp->cport = cport; ip_vs_addr_copy(af, &cp->vaddr, vaddr); cp->vport = vport; - ip_vs_addr_copy(af, &cp->daddr, daddr); + /* proto should only be IPPROTO_IP if d_addr is a fwmark */ + ip_vs_addr_copy(proto == IPPROTO_IP ? AF_UNSPEC : af, + &cp->daddr, daddr); cp->dport = dport; cp->flags = flags; spin_lock_init(&cp->lock); diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index cb3e031335e..8dddb17a947 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c @@ -278,7 +278,7 @@ ip_vs_sched_persist(struct ip_vs_service *svc, */ if (svc->fwmark) { union nf_inet_addr fwmark = { - .all = { 0, 0, 0, htonl(svc->fwmark) } + .ip = htonl(svc->fwmark) }; ct = ip_vs_ct_in_get(svc->af, IPPROTO_IP, &snet, 0, @@ -306,7 +306,7 @@ ip_vs_sched_persist(struct ip_vs_service *svc, */ if (svc->fwmark) { union nf_inet_addr fwmark = { - .all = { 0, 0, 0, htonl(svc->fwmark) } + .ip = htonl(svc->fwmark) }; ct = ip_vs_conn_new(svc->af, IPPROTO_IP, diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index f13fc57e1ec..c523f0b8cee 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -1186,28 +1186,6 @@ ctnetlink_change_conntrack(struct nf_conn *ct, struct nlattr *cda[]) return 0; } -static inline void -ctnetlink_event_report(struct nf_conn *ct, u32 pid, int report) -{ - unsigned int events = 0; - - if (test_bit(IPS_EXPECTED_BIT, &ct->status)) - events |= IPCT_RELATED; - else - events |= IPCT_NEW; - - nf_conntrack_event_report(IPCT_STATUS | - IPCT_HELPER | - IPCT_REFRESH | - IPCT_PROTOINFO | - IPCT_NATSEQADJ | - IPCT_MARK | - events, - ct, - pid, - report); -} - static struct nf_conn * ctnetlink_create_conntrack(struct nlattr *cda[], struct nf_conntrack_tuple *otuple, @@ -1373,6 +1351,7 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb, err = -ENOENT; if (nlh->nlmsg_flags & NLM_F_CREATE) { struct nf_conn *ct; + enum ip_conntrack_events events; ct = ctnetlink_create_conntrack(cda, &otuple, &rtuple, u3); @@ -1383,9 +1362,18 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb, err = 0; nf_conntrack_get(&ct->ct_general); spin_unlock_bh(&nf_conntrack_lock); - ctnetlink_event_report(ct, - NETLINK_CB(skb).pid, - nlmsg_report(nlh)); + if (test_bit(IPS_EXPECTED_BIT, &ct->status)) + events = IPCT_RELATED; + else + events = IPCT_NEW; + + nf_conntrack_event_report(IPCT_STATUS | + IPCT_HELPER | + IPCT_PROTOINFO | + IPCT_NATSEQADJ | + IPCT_MARK | events, + ct, NETLINK_CB(skb).pid, + nlmsg_report(nlh)); nf_ct_put(ct); } else spin_unlock_bh(&nf_conntrack_lock); @@ -1404,9 +1392,13 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb, if (err == 0) { nf_conntrack_get(&ct->ct_general); spin_unlock_bh(&nf_conntrack_lock); - ctnetlink_event_report(ct, - NETLINK_CB(skb).pid, - nlmsg_report(nlh)); + nf_conntrack_event_report(IPCT_STATUS | + IPCT_HELPER | + IPCT_PROTOINFO | + IPCT_NATSEQADJ | + IPCT_MARK, + ct, NETLINK_CB(skb).pid, + nlmsg_report(nlh)); nf_ct_put(ct); } else spin_unlock_bh(&nf_conntrack_lock); diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c index 8e757dd5339..aee0d6bea30 100644 --- a/net/netfilter/nf_conntrack_proto_dccp.c +++ b/net/netfilter/nf_conntrack_proto_dccp.c @@ -22,6 +22,7 @@ #include #include #include +#include #include static DEFINE_RWLOCK(dccp_lock); @@ -553,6 +554,9 @@ static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb, ct->proto.dccp.state = new_state; write_unlock_bh(&dccp_lock); + if (new_state != old_state) + nf_conntrack_event_cache(IPCT_PROTOINFO, ct); + dn = dccp_pernet(net); nf_ct_refresh_acct(ct, ctinfo, skb, dn->dccp_timeout[new_state]); diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c index b5ccf2b4b2e..97a6e93d742 100644 --- a/net/netfilter/nf_conntrack_proto_tcp.c +++ b/net/netfilter/nf_conntrack_proto_tcp.c @@ -634,6 +634,14 @@ static bool tcp_in_window(const struct nf_conn *ct, sender->td_end = end; sender->flags |= IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED; } + if (tcph->ack) { + if (!(sender->flags & IP_CT_TCP_FLAG_MAXACK_SET)) { + sender->td_maxack = ack; + sender->flags |= IP_CT_TCP_FLAG_MAXACK_SET; + } else if (after(ack, sender->td_maxack)) + sender->td_maxack = ack; + } + /* * Update receiver data. */ @@ -918,6 +926,16 @@ static int tcp_packet(struct nf_conn *ct, "nf_ct_tcp: invalid state "); return -NF_ACCEPT; case TCP_CONNTRACK_CLOSE: + if (index == TCP_RST_SET + && (ct->proto.tcp.seen[!dir].flags & IP_CT_TCP_FLAG_MAXACK_SET) + && before(ntohl(th->seq), ct->proto.tcp.seen[!dir].td_maxack)) { + /* Invalid RST */ + write_unlock_bh(&tcp_lock); + if (LOG_INVALID(net, IPPROTO_TCP)) + nf_log_packet(pf, 0, skb, NULL, NULL, NULL, + "nf_ct_tcp: invalid RST "); + return -NF_ACCEPT; + } if (index == TCP_RST_SET && ((test_bit(IPS_SEEN_REPLY_BIT, &ct->status) && ct->proto.tcp.last_index == TCP_SYN_SET) diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index fd326ac27ec..66a6dd5c519 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c @@ -581,6 +581,12 @@ nfulnl_log_packet(u_int8_t pf, + nla_total_size(sizeof(struct nfulnl_msg_packet_hw)) + nla_total_size(sizeof(struct nfulnl_msg_packet_timestamp)); + if (in && skb_mac_header_was_set(skb)) { + size += nla_total_size(skb->dev->hard_header_len) + + nla_total_size(sizeof(u_int16_t)) /* hwtype */ + + nla_total_size(sizeof(u_int16_t)); /* hwlen */ + } + spin_lock_bh(&inst->lock); if (inst->flags & NFULNL_CFG_F_SEQ) diff --git a/net/netfilter/xt_cluster.c b/net/netfilter/xt_cluster.c index 6c4847662b8..69a639f3540 100644 --- a/net/netfilter/xt_cluster.c +++ b/net/netfilter/xt_cluster.c @@ -135,7 +135,13 @@ static bool xt_cluster_mt_checkentry(const struct xt_mtchk_param *par) { struct xt_cluster_match_info *info = par->matchinfo; - if (info->node_mask >= (1 << info->total_nodes)) { + if (info->total_nodes > XT_CLUSTER_NODES_MAX) { + printk(KERN_ERR "xt_cluster: you have exceeded the maximum " + "number of cluster nodes (%u > %u)\n", + info->total_nodes, XT_CLUSTER_NODES_MAX); + return false; + } + if (info->node_mask >= (1ULL << info->total_nodes)) { printk(KERN_ERR "xt_cluster: this node mask cannot be " "higher than the total number of nodes\n"); return false; diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c index a5b5369c30f..219dcdbe388 100644 --- a/net/netfilter/xt_hashlimit.c +++ b/net/netfilter/xt_hashlimit.c @@ -926,7 +926,7 @@ static int dl_seq_show(struct seq_file *s, void *v) if (!hlist_empty(&htable->hash[*bucket])) { hlist_for_each_entry(ent, pos, &htable->hash[*bucket], node) if (dl_seq_real_show(ent, htable->family, s)) - return 1; + return -1; } return 0; } diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c index 0f1218b8d28..67e38a05624 100644 --- a/net/rxrpc/ar-connection.c +++ b/net/rxrpc/ar-connection.c @@ -343,9 +343,9 @@ static int rxrpc_connect_exclusive(struct rxrpc_sock *rx, /* not yet present - create a candidate for a new connection * and then redo the check */ conn = rxrpc_alloc_connection(gfp); - if (IS_ERR(conn)) { - _leave(" = %ld", PTR_ERR(conn)); - return PTR_ERR(conn); + if (!conn) { + _leave(" = -ENOMEM"); + return -ENOMEM; } conn->trans = trans; @@ -508,9 +508,9 @@ int rxrpc_connect_call(struct rxrpc_sock *rx, /* not yet present - create a candidate for a new connection and then * redo the check */ candidate = rxrpc_alloc_connection(gfp); - if (IS_ERR(candidate)) { - _leave(" = %ld", PTR_ERR(candidate)); - return PTR_ERR(candidate); + if (!candidate) { + _leave(" = -ENOMEM"); + return -ENOMEM; } candidate->trans = trans; diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 0759f32e9dc..09cdcdfe7e9 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -135,6 +135,7 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, void *arg) unsigned long cl; unsigned long fh; int err; + int tp_created = 0; if (net != &init_net) return -EINVAL; @@ -266,10 +267,7 @@ replay: goto errout; } - spin_lock_bh(root_lock); - tp->next = *back; - *back = tp; - spin_unlock_bh(root_lock); + tp_created = 1; } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) goto errout; @@ -296,8 +294,11 @@ replay: switch (n->nlmsg_type) { case RTM_NEWTFILTER: err = -EEXIST; - if (n->nlmsg_flags & NLM_F_EXCL) + if (n->nlmsg_flags & NLM_F_EXCL) { + if (tp_created) + tcf_destroy(tp); goto errout; + } break; case RTM_DELTFILTER: err = tp->ops->delete(tp, fh); @@ -314,8 +315,18 @@ replay: } err = tp->ops->change(tp, cl, t->tcm_handle, tca, &fh); - if (err == 0) + if (err == 0) { + if (tp_created) { + spin_lock_bh(root_lock); + tp->next = *back; + *back = tp; + spin_unlock_bh(root_lock); + } tfilter_notify(skb, n, tp, fh, RTM_NEWTFILTER); + } else { + if (tp_created) + tcf_destroy(tp); + } errout: if (cl) diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c index 91a3db4a76f..e5becb92b3e 100644 --- a/net/sched/cls_cgroup.c +++ b/net/sched/cls_cgroup.c @@ -104,8 +104,7 @@ static int cls_cgroup_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_result *res) { struct cls_cgroup_head *head = tp->root; - struct cgroup_cls_state *cs; - int ret = 0; + u32 classid; /* * Due to the nature of the classifier it is required to ignore all @@ -121,17 +120,18 @@ static int cls_cgroup_classify(struct sk_buff *skb, struct tcf_proto *tp, return -1; rcu_read_lock(); - cs = task_cls_state(current); - if (cs->classid && tcf_em_tree_match(skb, &head->ematches, NULL)) { - res->classid = cs->classid; - res->class = 0; - ret = tcf_exts_exec(skb, &head->exts, res); - } else - ret = -1; - + classid = task_cls_state(current)->classid; rcu_read_unlock(); - return ret; + if (!classid) + return -1; + + if (!tcf_em_tree_match(skb, &head->ematches, NULL)) + return -1; + + res->classid = classid; + res->class = 0; + return tcf_exts_exec(skb, &head->exts, res); } static unsigned long cls_cgroup_get(struct tcf_proto *tp, u32 handle) @@ -167,6 +167,9 @@ static int cls_cgroup_change(struct tcf_proto *tp, unsigned long base, struct tcf_exts e; int err; + if (!tca[TCA_OPTIONS]) + return -EINVAL; + if (head == NULL) { if (!handle) return -EINVAL; diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c index 92cfc9d7e3b..69188e8358b 100644 --- a/net/sched/sch_fifo.c +++ b/net/sched/sch_fifo.c @@ -51,7 +51,7 @@ static int fifo_init(struct Qdisc *sch, struct nlattr *opt) u32 limit = qdisc_dev(sch)->tx_queue_len ? : 1; if (sch->ops == &bfifo_qdisc_ops) - limit *= qdisc_dev(sch)->mtu; + limit *= psched_mtu(qdisc_dev(sch)); q->limit = limit; } else { diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index ec697cebb63..3b641829723 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c @@ -303,6 +303,8 @@ restart: switch (teql_resolve(skb, skb_res, slave)) { case 0: if (__netif_tx_trylock(slave_txq)) { + unsigned int length = qdisc_pkt_len(skb); + if (!netif_tx_queue_stopped(slave_txq) && !netif_tx_queue_frozen(slave_txq) && slave_ops->ndo_start_xmit(skb, slave) == 0) { @@ -310,8 +312,7 @@ restart: master->slaves = NEXT_SLAVE(q); netif_wake_queue(dev); master->stats.tx_packets++; - master->stats.tx_bytes += - qdisc_pkt_len(skb); + master->stats.tx_bytes += length; return 0; } __netif_tx_unlock(slave_txq); diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index af3198814c1..9d504234af4 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -345,6 +345,7 @@ static void svc_sock_setbufsize(struct socket *sock, unsigned int snd, lock_sock(sock->sk); sock->sk->sk_sndbuf = snd * 2; sock->sk->sk_rcvbuf = rcv * 2; + sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK|SOCK_RCVBUF_LOCK; release_sock(sock->sk); #endif } @@ -796,6 +797,23 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp) test_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags), test_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags)); + if (test_and_clear_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags)) + /* sndbuf needs to have room for one request + * per thread, otherwise we can stall even when the + * network isn't a bottleneck. + * + * We count all threads rather than threads in a + * particular pool, which provides an upper bound + * on the number of threads which will access the socket. + * + * rcvbuf just needs to be able to hold a few requests. + * Normally they will be removed from the queue + * as soon a a complete request arrives. + */ + svc_sock_setbufsize(svsk->sk_sock, + (serv->sv_nrthreads+3) * serv->sv_max_mesg, + 3 * serv->sv_max_mesg); + clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); /* Receive data. If we haven't got the record length yet, get @@ -1043,6 +1061,15 @@ static void svc_tcp_init(struct svc_sock *svsk, struct svc_serv *serv) tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF; + /* initialise setting must have enough space to + * receive and respond to one request. + * svc_tcp_recvfrom will re-adjust if necessary + */ + svc_sock_setbufsize(svsk->sk_sock, + 3 * svsk->sk_xprt.xpt_server->sv_max_mesg, + 3 * svsk->sk_xprt.xpt_server->sv_max_mesg); + + set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags); set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); if (sk->sk_state != TCP_ESTABLISHED) set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags); @@ -1112,14 +1139,8 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv, /* Initialize the socket */ if (sock->type == SOCK_DGRAM) svc_udp_init(svsk, serv); - else { - /* initialise setting must have enough space to - * receive and respond to one request. - */ - svc_sock_setbufsize(svsk->sk_sock, 4 * serv->sv_max_mesg, - 4 * serv->sv_max_mesg); + else svc_tcp_init(svsk, serv); - } dprintk("svc: svc_setup_socket created %p (inet %p)\n", svsk, svsk->sk_sk); diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c index 629a28764da..42a6f9f2028 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c @@ -265,7 +265,7 @@ static int fast_reg_read_chunks(struct svcxprt_rdma *xprt, frmr->page_list->page_list[page_no] = ib_dma_map_single(xprt->sc_cm_id->device, page_address(rqstp->rq_arg.pages[page_no]), - PAGE_SIZE, DMA_TO_DEVICE); + PAGE_SIZE, DMA_FROM_DEVICE); if (ib_dma_mapping_error(xprt->sc_cm_id->device, frmr->page_list->page_list[page_no])) goto fatal_err; diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c index 6c26a675435..f11be72a1a8 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c @@ -128,7 +128,8 @@ static int fast_reg_xdr(struct svcxprt_rdma *xprt, page_bytes -= sge_bytes; frmr->page_list->page_list[page_no] = - ib_dma_map_page(xprt->sc_cm_id->device, page, 0, + ib_dma_map_single(xprt->sc_cm_id->device, + page_address(page), PAGE_SIZE, DMA_TO_DEVICE); if (ib_dma_mapping_error(xprt->sc_cm_id->device, frmr->page_list->page_list[page_no])) @@ -183,6 +184,7 @@ static int fast_reg_xdr(struct svcxprt_rdma *xprt, fatal_err: printk("svcrdma: Error fast registering memory for xprt %p\n", xprt); + vec->frmr = NULL; svc_rdma_put_frmr(xprt, frmr); return -EIO; } @@ -516,6 +518,7 @@ static int send_reply(struct svcxprt_rdma *rdma, "svcrdma: could not post a receive buffer, err=%d." "Closing transport %p.\n", ret, rdma); set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags); + svc_rdma_put_frmr(rdma, vec->frmr); svc_rdma_put_context(ctxt, 0); return -ENOTCONN; } @@ -530,18 +533,17 @@ static int send_reply(struct svcxprt_rdma *rdma, clear_bit(RDMACTXT_F_FAST_UNREG, &ctxt->flags); /* Prepare the SGE for the RPCRDMA Header */ + ctxt->sge[0].lkey = rdma->sc_dma_lkey; + ctxt->sge[0].length = svc_rdma_xdr_get_reply_hdr_len(rdma_resp); ctxt->sge[0].addr = - ib_dma_map_page(rdma->sc_cm_id->device, - page, 0, PAGE_SIZE, DMA_TO_DEVICE); + ib_dma_map_single(rdma->sc_cm_id->device, page_address(page), + ctxt->sge[0].length, DMA_TO_DEVICE); if (ib_dma_mapping_error(rdma->sc_cm_id->device, ctxt->sge[0].addr)) goto err; atomic_inc(&rdma->sc_dma_used); ctxt->direction = DMA_TO_DEVICE; - ctxt->sge[0].length = svc_rdma_xdr_get_reply_hdr_len(rdma_resp); - ctxt->sge[0].lkey = rdma->sc_dma_lkey; - /* Determine how many of our SGE are to be transmitted */ for (sge_no = 1; byte_count && sge_no < vec->count; sge_no++) { sge_bytes = min_t(size_t, vec->sge[sge_no].iov_len, byte_count); @@ -606,6 +608,7 @@ static int send_reply(struct svcxprt_rdma *rdma, return 0; err: + svc_rdma_unmap_dma(ctxt); svc_rdma_put_frmr(rdma, vec->frmr); svc_rdma_put_context(ctxt, 1); return -EIO; diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index 3d810e7df3f..5151f9f6c57 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c @@ -500,8 +500,8 @@ int svc_rdma_post_recv(struct svcxprt_rdma *xprt) BUG_ON(sge_no >= xprt->sc_max_sge); page = svc_rdma_get_page(); ctxt->pages[sge_no] = page; - pa = ib_dma_map_page(xprt->sc_cm_id->device, - page, 0, PAGE_SIZE, + pa = ib_dma_map_single(xprt->sc_cm_id->device, + page_address(page), PAGE_SIZE, DMA_FROM_DEVICE); if (ib_dma_mapping_error(xprt->sc_cm_id->device, pa)) goto err_put_ctxt; @@ -520,8 +520,9 @@ int svc_rdma_post_recv(struct svcxprt_rdma *xprt) svc_xprt_get(&xprt->sc_xprt); ret = ib_post_recv(xprt->sc_qp, &recv_wr, &bad_recv_wr); if (ret) { - svc_xprt_put(&xprt->sc_xprt); + svc_rdma_unmap_dma(ctxt); svc_rdma_put_context(ctxt, 1); + svc_xprt_put(&xprt->sc_xprt); } return ret; @@ -1314,8 +1315,8 @@ void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp, length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va); /* Prepare SGE for local address */ - sge.addr = ib_dma_map_page(xprt->sc_cm_id->device, - p, 0, PAGE_SIZE, DMA_FROM_DEVICE); + sge.addr = ib_dma_map_single(xprt->sc_cm_id->device, + page_address(p), PAGE_SIZE, DMA_FROM_DEVICE); if (ib_dma_mapping_error(xprt->sc_cm_id->device, sge.addr)) { put_page(p); return; @@ -1342,7 +1343,7 @@ void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp, if (ret) { dprintk("svcrdma: Error %d posting send for protocol error\n", ret); - ib_dma_unmap_page(xprt->sc_cm_id->device, + ib_dma_unmap_single(xprt->sc_cm_id->device, sge.addr, PAGE_SIZE, DMA_FROM_DEVICE); svc_rdma_put_context(ctxt, 1); diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index 3b21e0cc5e6..465aafc2007 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c @@ -1495,7 +1495,8 @@ rpcrdma_register_frmr_external(struct rpcrdma_mr_seg *seg, frmr_wr.wr.fast_reg.page_shift = PAGE_SHIFT; frmr_wr.wr.fast_reg.length = i << PAGE_SHIFT; frmr_wr.wr.fast_reg.access_flags = (writing ? - IB_ACCESS_REMOTE_WRITE : IB_ACCESS_REMOTE_READ); + IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE : + IB_ACCESS_REMOTE_READ); frmr_wr.wr.fast_reg.rkey = seg1->mr_chunk.rl_mw->r.frmr.fr_mr->rkey; DECR_CQCOUNT(&r_xprt->rx_ep); diff --git a/net/wimax/op-msg.c b/net/wimax/op-msg.c index 5d149c1b5f0..9ad4d893a56 100644 --- a/net/wimax/op-msg.c +++ b/net/wimax/op-msg.c @@ -149,7 +149,8 @@ struct sk_buff *wimax_msg_alloc(struct wimax_dev *wimax_dev, } result = nla_put(skb, WIMAX_GNL_MSG_DATA, size, msg); if (result < 0) { - dev_err(dev, "no memory to add payload in attribute\n"); + dev_err(dev, "no memory to add payload (msg %p size %zu) in " + "attribute: %d\n", msg, size, result); goto error_nla_put; } genlmsg_end(skb, genl_msg); @@ -299,10 +300,10 @@ int wimax_msg(struct wimax_dev *wimax_dev, const char *pipe_name, struct sk_buff *skb; skb = wimax_msg_alloc(wimax_dev, pipe_name, buf, size, gfp_flags); - if (skb == NULL) - goto error_msg_new; - result = wimax_msg_send(wimax_dev, skb); -error_msg_new: + if (IS_ERR(skb)) + result = PTR_ERR(skb); + else + result = wimax_msg_send(wimax_dev, skb); return result; } EXPORT_SYMBOL_GPL(wimax_msg); diff --git a/net/wimax/stack.c b/net/wimax/stack.c index a0ee76b5251..933e1422b09 100644 --- a/net/wimax/stack.c +++ b/net/wimax/stack.c @@ -338,8 +338,21 @@ out: */ void wimax_state_change(struct wimax_dev *wimax_dev, enum wimax_st new_state) { + /* + * A driver cannot take the wimax_dev out of the + * __WIMAX_ST_NULL state unless by calling wimax_dev_add(). If + * the wimax_dev's state is still NULL, we ignore any request + * to change its state because it means it hasn't been yet + * registered. + * + * There is no need to complain about it, as routines that + * call this might be shared from different code paths that + * are called before or after wimax_dev_add() has done its + * job. + */ mutex_lock(&wimax_dev->mutex); - __wimax_state_change(wimax_dev, new_state); + if (wimax_dev->state > __WIMAX_ST_NULL) + __wimax_state_change(wimax_dev, new_state); mutex_unlock(&wimax_dev->mutex); return; } @@ -376,7 +389,7 @@ EXPORT_SYMBOL_GPL(wimax_state_get); void wimax_dev_init(struct wimax_dev *wimax_dev) { INIT_LIST_HEAD(&wimax_dev->id_table_node); - __wimax_state_set(wimax_dev, WIMAX_ST_UNINITIALIZED); + __wimax_state_set(wimax_dev, __WIMAX_ST_NULL); mutex_init(&wimax_dev->mutex); mutex_init(&wimax_dev->mutex_reset); } diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 6c1993d9990..487cb627ddb 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -907,6 +907,7 @@ EXPORT_SYMBOL(freq_reg_info); int freq_reg_info(struct wiphy *wiphy, u32 center_freq, u32 *bandwidth, const struct ieee80211_reg_rule **reg_rule) { + assert_cfg80211_lock(); return freq_reg_info_regd(wiphy, center_freq, bandwidth, reg_rule, NULL); } @@ -1133,7 +1134,8 @@ static bool reg_is_world_roaming(struct wiphy *wiphy) if (is_world_regdom(cfg80211_regdomain->alpha2) || (wiphy->regd && is_world_regdom(wiphy->regd->alpha2))) return true; - if (last_request->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE && + if (last_request && + last_request->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE && wiphy->custom_regulatory) return true; return false; @@ -1142,6 +1144,12 @@ static bool reg_is_world_roaming(struct wiphy *wiphy) /* Reap the advantages of previously found beacons */ static void reg_process_beacons(struct wiphy *wiphy) { + /* + * Means we are just firing up cfg80211, so no beacons would + * have been processed yet. + */ + if (!last_request) + return; if (!reg_is_world_roaming(wiphy)) return; wiphy_update_beacon_reg(wiphy); @@ -1176,6 +1184,8 @@ static void handle_channel_custom(struct wiphy *wiphy, struct ieee80211_supported_band *sband; struct ieee80211_channel *chan; + assert_cfg80211_lock(); + sband = wiphy->bands[band]; BUG_ON(chan_idx >= sband->n_channels); chan = &sband->channels[chan_idx]; @@ -1214,10 +1224,13 @@ void wiphy_apply_custom_regulatory(struct wiphy *wiphy, const struct ieee80211_regdomain *regd) { enum ieee80211_band band; + + mutex_lock(&cfg80211_mutex); for (band = 0; band < IEEE80211_NUM_BANDS; band++) { if (wiphy->bands[band]) handle_band_custom(wiphy, band, regd); } + mutex_unlock(&cfg80211_mutex); } EXPORT_SYMBOL(wiphy_apply_custom_regulatory); @@ -1423,7 +1436,7 @@ new_request: return call_crda(last_request->alpha2); } -/* This currently only processes user and driver regulatory hints */ +/* This processes *all* regulatory hints */ static void reg_process_hint(struct regulatory_request *reg_request) { int r = 0; @@ -1538,6 +1551,13 @@ static int regulatory_hint_core(const char *alpha2) queue_regulatory_request(request); + /* + * This ensures last_request is populated once modules + * come swinging in and calling regulatory hints and + * wiphy_apply_custom_regulatory(). + */ + flush_scheduled_work(); + return 0; } diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 2ae65b39b52..1f260c40b6c 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -395,6 +395,7 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev, memcpy(ies, res->pub.information_elements, ielen); found->ies_allocated = true; found->pub.information_elements = ies; + found->pub.len_information_elements = ielen; } } } diff --git a/net/wireless/wext.c b/net/wireless/wext.c index cb6a5bb85d8..0e59f9ae9b8 100644 --- a/net/wireless/wext.c +++ b/net/wireless/wext.c @@ -786,6 +786,13 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd, err = -EFAULT; goto out; } + + if (cmd == SIOCSIWENCODEEXT) { + struct iw_encode_ext *ee = (void *) extra; + + if (iwp->length < sizeof(*ee) + ee->key_len) + return -EFAULT; + } } err = handler(dev, info, (union iwreq_data *) iwp, extra); diff --git a/scripts/setlocalversion b/scripts/setlocalversion index 32c8554f394..00790472f64 100755 --- a/scripts/setlocalversion +++ b/scripts/setlocalversion @@ -1,5 +1,13 @@ #!/bin/sh -# Print additional version information for non-release trees. +# +# This scripts adds local version information from the version +# control systems git, mercurial (hg) and subversion (svn). +# +# If something goes wrong, send a mail the kernel build mailinglist +# (see MAINTAINERS) and CC Nico Schottelius +# . +# +# usage() { echo "Usage: $0 [srctree]" >&2 @@ -10,12 +18,20 @@ cd "${1:-.}" || usage # Check for git and a git repo. if head=`git rev-parse --verify --short HEAD 2>/dev/null`; then - # Do we have an untagged tag? - if atag=`git describe 2>/dev/null`; then - echo "$atag" | awk -F- '{printf("-%05d-%s", $(NF-1),$(NF))}' - # add -g${head}, if there is no usable tag - else - printf '%s%s' -g $head + + # If we are at a tagged commit (like "v2.6.30-rc6"), we ignore it, + # because this version is defined in the top level Makefile. + if [ -z "`git describe --exact-match 2>/dev/null`" ]; then + + # If we are past a tagged commit (like "v2.6.30-rc5-302-g72357d5"), + # we pretty print it. + if atag="`git describe 2>/dev/null`"; then + echo "$atag" | awk -F- '{printf("-%05d-%s", $(NF-1),$(NF))}' + + # If we don't have a tag at all we print -g{commitish}. + else + printf '%s%s' -g $head + fi fi # Is this git on svn? diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c index d4d41b3efc7..ddfb9cccf46 100644 --- a/security/tomoyo/common.c +++ b/security/tomoyo/common.c @@ -1720,14 +1720,14 @@ static bool tomoyo_policy_loader_exists(void) * policies are not loaded yet. * Thus, let do_execve() call this function everytime. */ - struct nameidata nd; + struct path path; - if (path_lookup(tomoyo_loader, LOOKUP_FOLLOW, &nd)) { + if (kern_path(tomoyo_loader, LOOKUP_FOLLOW, &path)) { printk(KERN_INFO "Not activating Mandatory Access Control now " "since %s doesn't exist.\n", tomoyo_loader); return false; } - path_put(&nd.path); + path_put(&path); return true; } diff --git a/security/tomoyo/realpath.c b/security/tomoyo/realpath.c index bf8e2b45168..40927a84cb6 100644 --- a/security/tomoyo/realpath.c +++ b/security/tomoyo/realpath.c @@ -165,11 +165,11 @@ char *tomoyo_realpath_from_path(struct path *path) */ char *tomoyo_realpath(const char *pathname) { - struct nameidata nd; + struct path path; - if (pathname && path_lookup(pathname, LOOKUP_FOLLOW, &nd) == 0) { - char *buf = tomoyo_realpath_from_path(&nd.path); - path_put(&nd.path); + if (pathname && kern_path(pathname, LOOKUP_FOLLOW, &path) == 0) { + char *buf = tomoyo_realpath_from_path(&path); + path_put(&path); return buf; } return NULL; @@ -184,11 +184,11 @@ char *tomoyo_realpath(const char *pathname) */ char *tomoyo_realpath_nofollow(const char *pathname) { - struct nameidata nd; + struct path path; - if (pathname && path_lookup(pathname, 0, &nd) == 0) { - char *buf = tomoyo_realpath_from_path(&nd.path); - path_put(&nd.path); + if (pathname && kern_path(pathname, 0, &path) == 0) { + char *buf = tomoyo_realpath_from_path(&path); + path_put(&path); return buf; } return NULL; diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c index 5b481912752..e42be5c4f05 100644 --- a/security/tomoyo/tomoyo.c +++ b/security/tomoyo/tomoyo.c @@ -27,6 +27,12 @@ static int tomoyo_cred_prepare(struct cred *new, const struct cred *old, static int tomoyo_bprm_set_creds(struct linux_binprm *bprm) { + int rc; + + rc = cap_bprm_set_creds(bprm); + if (rc) + return rc; + /* * Do only if this function is called for the first time of an execve * operation. diff --git a/sound/arm/aaci.c b/sound/arm/aaci.c index 7fbd68fab94..5c48e36038f 100644 --- a/sound/arm/aaci.c +++ b/sound/arm/aaci.c @@ -1074,7 +1074,7 @@ static unsigned int __devinit aaci_size_fifo(struct aaci *aaci) return i; } -static int __devinit aaci_probe(struct amba_device *dev, void *id) +static int __devinit aaci_probe(struct amba_device *dev, struct amba_id *id) { struct aaci *aaci; int ret, i; diff --git a/sound/arm/pxa2xx-ac97-lib.c b/sound/arm/pxa2xx-ac97-lib.c index a2c12d105c9..6fdca97186e 100644 --- a/sound/arm/pxa2xx-ac97-lib.c +++ b/sound/arm/pxa2xx-ac97-lib.c @@ -65,7 +65,7 @@ static void set_resetgpio_mode(int resetgpio_action) switch (resetgpio_action) { case RESETGPIO_NORMAL_ALTFUNC: if (reset_gpio == 113) - mode = 113 | GPIO_OUT | GPIO_DFLT_LOW; + mode = 113 | GPIO_ALT_FN_2_OUT; if (reset_gpio == 95) mode = 95 | GPIO_ALT_FN_1_OUT; break; diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c index a2a792c18c4..d659995ac3a 100644 --- a/sound/core/pcm_lib.c +++ b/sound/core/pcm_lib.c @@ -249,6 +249,11 @@ static int snd_pcm_update_hw_ptr_interrupt(struct snd_pcm_substream *substream) new_hw_ptr = hw_base + pos; } } + + /* Do jiffies check only in xrun_debug mode */ + if (!xrun_debug(substream)) + goto no_jiffies_check; + /* Skip the jiffies check for hardwares with BATCH flag. * Such hardware usually just increases the position at each IRQ, * thus it can't give any strange position. @@ -336,7 +341,9 @@ int snd_pcm_update_hw_ptr(struct snd_pcm_substream *substream) hw_base = 0; new_hw_ptr = hw_base + pos; } - if (((delta * HZ) / runtime->rate) > jdelta + HZ/100) { + /* Do jiffies check only in xrun_debug mode */ + if (xrun_debug(substream) && + ((delta * HZ) / runtime->rate) > jdelta + HZ/100) { hw_ptr_error(substream, "hw_ptr skipping! " "(pos=%ld, delta=%ld, period=%ld, jdelta=%lu/%lu)\n", @@ -1478,7 +1485,6 @@ static int snd_pcm_lib_ioctl_reset(struct snd_pcm_substream *substream, runtime->status->hw_ptr %= runtime->buffer_size; else runtime->status->hw_ptr = 0; - runtime->hw_ptr_jiffies = jiffies; snd_pcm_stream_unlock_irqrestore(substream, flags); return 0; } diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c index fc6f98e257d..b5da656d1ec 100644 --- a/sound/core/pcm_native.c +++ b/sound/core/pcm_native.c @@ -848,6 +848,7 @@ static void snd_pcm_post_start(struct snd_pcm_substream *substream, int state) { struct snd_pcm_runtime *runtime = substream->runtime; snd_pcm_trigger_tstamp(substream); + runtime->hw_ptr_jiffies = jiffies; runtime->status->state = state; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK && runtime->silence_size > 0) @@ -961,6 +962,11 @@ static int snd_pcm_do_pause(struct snd_pcm_substream *substream, int push) { if (substream->runtime->trigger_master != substream) return 0; + /* The jiffies check in snd_pcm_update_hw_ptr*() is done by + * a delta betwen the current jiffies, this gives a large enough + * delta, effectively to skip the check once. + */ + substream->runtime->hw_ptr_jiffies = jiffies - HZ * 1000; return substream->ops->trigger(substream, push ? SNDRV_PCM_TRIGGER_PAUSE_PUSH : SNDRV_PCM_TRIGGER_PAUSE_RELEASE); diff --git a/sound/drivers/pcsp/pcsp_mixer.c b/sound/drivers/pcsp/pcsp_mixer.c index caeb0f57fcc..199b0337714 100644 --- a/sound/drivers/pcsp/pcsp_mixer.c +++ b/sound/drivers/pcsp/pcsp_mixer.c @@ -50,8 +50,8 @@ static int pcsp_treble_info(struct snd_kcontrol *kcontrol, uinfo->value.enumerated.items = chip->max_treble + 1; if (uinfo->value.enumerated.item > chip->max_treble) uinfo->value.enumerated.item = chip->max_treble; - sprintf(uinfo->value.enumerated.name, "%d", - PCSP_CALC_RATE(uinfo->value.enumerated.item)); + sprintf(uinfo->value.enumerated.name, "%lu", + (unsigned long)PCSP_CALC_RATE(uinfo->value.enumerated.item)); return 0; } diff --git a/sound/drivers/serial-u16550.c b/sound/drivers/serial-u16550.c index b2b6d50c942..a25fb7b1f44 100644 --- a/sound/drivers/serial-u16550.c +++ b/sound/drivers/serial-u16550.c @@ -963,16 +963,11 @@ static int __devinit snd_serial_probe(struct platform_device *devptr) if (err < 0) goto _err; - sprintf(card->longname, "%s at 0x%lx, irq %d speed %d div %d outs %d ins %d adaptor %s droponfull %d", + sprintf(card->longname, "%s [%s] at %#lx, irq %d", card->shortname, - uart->base, - uart->irq, - uart->speed, - (int)uart->divisor, - outs[dev], - ins[dev], adaptor_names[uart->adaptor], - uart->drop_on_full); + uart->base, + uart->irq); snd_card_set_dev(card, &devptr->dev); diff --git a/sound/pci/ac97/ac97_patch.c b/sound/pci/ac97/ac97_patch.c index 81bc93e5f1e..7337abdbe4e 100644 --- a/sound/pci/ac97/ac97_patch.c +++ b/sound/pci/ac97/ac97_patch.c @@ -958,10 +958,13 @@ static int patch_sigmatel_stac9708_3d(struct snd_ac97 * ac97) } static const struct snd_kcontrol_new snd_ac97_sigmatel_4speaker = -AC97_SINGLE("Sigmatel 4-Speaker Stereo Playback Switch", AC97_SIGMATEL_DAC2INVERT, 2, 1, 0); +AC97_SINGLE("Sigmatel 4-Speaker Stereo Playback Switch", + AC97_SIGMATEL_DAC2INVERT, 2, 1, 0); +/* "Sigmatel " removed due to excessive name length: */ static const struct snd_kcontrol_new snd_ac97_sigmatel_phaseinvert = -AC97_SINGLE("Sigmatel Surround Phase Inversion Playback Switch", AC97_SIGMATEL_DAC2INVERT, 3, 1, 0); +AC97_SINGLE("Surround Phase Inversion Playback Switch", + AC97_SIGMATEL_DAC2INVERT, 3, 1, 0); static const struct snd_kcontrol_new snd_ac97_sigmatel_controls[] = { AC97_SINGLE("Sigmatel DAC 6dB Attenuate", AC97_SIGMATEL_ANALOG, 1, 1, 0), diff --git a/sound/pci/ca0106/ca0106_mixer.c b/sound/pci/ca0106/ca0106_mixer.c index ad2888705d2..c111efe61c3 100644 --- a/sound/pci/ca0106/ca0106_mixer.c +++ b/sound/pci/ca0106/ca0106_mixer.c @@ -800,7 +800,7 @@ int __devinit snd_ca0106_mixer(struct snd_ca0106 *emu) "Capture Volume", "External Amplifier", "Sigmatel 4-Speaker Stereo Playback Switch", - "Sigmatel Surround Phase Inversion Playback ", + "Surround Phase Inversion Playback Switch", NULL }; static char *ca0106_rename_ctls[] = { diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 21e99cfa8c4..3128e1a6bc6 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -2141,6 +2141,7 @@ static struct snd_pci_quirk probe_mask_list[] __devinitdata = { /* including bogus ALC268 in slot#2 that conflicts with ALC888 */ SND_PCI_QUIRK(0x17c0, 0x4085, "Medion MD96630", 0x01), /* forced codec slots */ + SND_PCI_QUIRK(0x1043, 0x1262, "ASUS W5Fm", 0x103), SND_PCI_QUIRK(0x1046, 0x1262, "ASUS W5F", 0x103), {} }; diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index 56ce19e68cb..4fcbe21829a 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c @@ -1848,6 +1848,7 @@ static const char *cxt5051_models[CXT5051_MODELS] = { static struct snd_pci_quirk cxt5051_cfg_tbl[] = { SND_PCI_QUIRK(0x103c, 0x30cf, "HP DV6736", CXT5051_HP_DV6736), + SND_PCI_QUIRK(0x103c, 0x360b, "Compaq Presario CQ60", CXT5051_HP), SND_PCI_QUIRK(0x14f1, 0x0101, "Conexant Reference board", CXT5051_LAPTOP), SND_PCI_QUIRK(0x14f1, 0x5051, "HP Spartan 1.1", CXT5051_HP), diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index b8a0d3e7927..0fd258eba3a 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -776,6 +776,12 @@ static void alc_set_input_pin(struct hda_codec *codec, hda_nid_t nid, pincap = (pincap & AC_PINCAP_VREF) >> AC_PINCAP_VREF_SHIFT; if (pincap & AC_PINCAP_VREF_80) val = PIN_VREF80; + else if (pincap & AC_PINCAP_VREF_50) + val = PIN_VREF50; + else if (pincap & AC_PINCAP_VREF_100) + val = PIN_VREF100; + else if (pincap & AC_PINCAP_VREF_GRD) + val = PIN_VREFGRD; } snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, val); } @@ -12058,6 +12064,7 @@ static struct snd_pci_quirk alc268_cfg_tbl[] = { SND_PCI_QUIRK(0x1028, 0x0253, "Dell OEM", ALC268_DELL), SND_PCI_QUIRK(0x1028, 0x02b0, "Dell Inspiron Mini9", ALC268_DELL), SND_PCI_QUIRK(0x103c, 0x30cc, "TOSHIBA", ALC268_TOSHIBA), + SND_PCI_QUIRK(0x103c, 0x30f1, "HP TX25xx series", ALC268_TOSHIBA), SND_PCI_QUIRK(0x1043, 0x1205, "ASUS W7J", ALC268_3ST), SND_PCI_QUIRK(0x1179, 0xff10, "TOSHIBA A205", ALC268_TOSHIBA), SND_PCI_QUIRK(0x1179, 0xff50, "TOSHIBA A305", ALC268_TOSHIBA), diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index 917bc5d3ac2..d2fd8ef6aef 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c @@ -150,6 +150,7 @@ enum { STAC_D965_REF, STAC_D965_3ST, STAC_D965_5ST, + STAC_D965_5ST_NO_FP, STAC_DELL_3ST, STAC_DELL_BIOS, STAC_927X_MODELS @@ -2154,6 +2155,13 @@ static unsigned int d965_5st_pin_configs[14] = { 0x40000100, 0x40000100 }; +static unsigned int d965_5st_no_fp_pin_configs[14] = { + 0x40000100, 0x40000100, 0x0181304e, 0x01014010, + 0x01a19040, 0x01011012, 0x01016011, 0x40000100, + 0x40000100, 0x40000100, 0x40000100, 0x01442070, + 0x40000100, 0x40000100 +}; + static unsigned int dell_3st_pin_configs[14] = { 0x02211230, 0x02a11220, 0x01a19040, 0x01114210, 0x01111212, 0x01116211, 0x01813050, 0x01112214, @@ -2166,6 +2174,7 @@ static unsigned int *stac927x_brd_tbl[STAC_927X_MODELS] = { [STAC_D965_REF] = ref927x_pin_configs, [STAC_D965_3ST] = d965_3st_pin_configs, [STAC_D965_5ST] = d965_5st_pin_configs, + [STAC_D965_5ST_NO_FP] = d965_5st_no_fp_pin_configs, [STAC_DELL_3ST] = dell_3st_pin_configs, [STAC_DELL_BIOS] = NULL, }; @@ -2176,6 +2185,7 @@ static const char *stac927x_models[STAC_927X_MODELS] = { [STAC_D965_REF] = "ref", [STAC_D965_3ST] = "3stack", [STAC_D965_5ST] = "5stack", + [STAC_D965_5ST_NO_FP] = "5stack-no-fp", [STAC_DELL_3ST] = "dell-3stack", [STAC_DELL_BIOS] = "dell-bios", }; @@ -4079,7 +4089,12 @@ static int stac92xx_init(struct hda_codec *codec) pinctl = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_PIN_WIDGET_CONTROL, 0); /* if PINCTL already set then skip */ - if (!(pinctl & AC_PINCTL_IN_EN)) { + /* Also, if both INPUT and OUTPUT are set, + * it must be a BIOS bug; need to override, too + */ + if (!(pinctl & AC_PINCTL_IN_EN) || + (pinctl & AC_PINCTL_OUT_EN)) { + pinctl &= ~AC_PINCTL_OUT_EN; pinctl |= AC_PINCTL_IN_EN; stac92xx_auto_set_pinctl(codec, nid, pinctl); diff --git a/sound/pci/riptide/riptide.c b/sound/pci/riptide/riptide.c index 6f1034417a0..e51a5ef1954 100644 --- a/sound/pci/riptide/riptide.c +++ b/sound/pci/riptide/riptide.c @@ -889,7 +889,7 @@ static int sendcmd(struct cmdif *cif, u32 flags, u32 cmd, u32 parm, spin_lock_irqsave(&cif->lock, irqflags); while (i++ < CMDIF_TIMEOUT && !IS_READY(cif->hwport)) udelay(10); - if (i >= CMDIF_TIMEOUT) { + if (i > CMDIF_TIMEOUT) { err = -EBUSY; goto errout; } @@ -907,8 +907,10 @@ static int sendcmd(struct cmdif *cif, u32 flags, u32 cmd, u32 parm, WRITE_PORT_ULONG(cmdport->data1, cmd); /* write cmd */ if ((flags & RESP) && ret) { while (!IS_DATF(cmdport) && - time++ < CMDIF_TIMEOUT) + time < CMDIF_TIMEOUT) { udelay(10); + time++; + } if (time < CMDIF_TIMEOUT) { /* read response */ ret->retlongs[0] = READ_PORT_ULONG(cmdport->data1); @@ -1454,7 +1456,7 @@ static int snd_riptide_trigger(struct snd_pcm_substream *substream, int cmd) SEND_GPOS(cif, 0, data->id, &rptr); udelay(1); } while (i != rptr.retlongs[1] && j++ < MAX_WRITE_RETRY); - if (j >= MAX_WRITE_RETRY) + if (j > MAX_WRITE_RETRY) snd_printk(KERN_ERR "Riptide: Could not stop stream!"); break; case SNDRV_PCM_TRIGGER_PAUSE_PUSH: @@ -1783,7 +1785,7 @@ snd_riptide_codec_write(struct snd_ac97 *ac97, unsigned short reg, SEND_SACR(cif, val, reg); SEND_RACR(cif, reg, &rptr); } while (rptr.retwords[1] != val && i++ < MAX_WRITE_RETRY); - if (i == MAX_WRITE_RETRY) + if (i > MAX_WRITE_RETRY) snd_printdd("Write AC97 reg failed\n"); } diff --git a/sound/pci/via82xx.c b/sound/pci/via82xx.c index 809b233dd4a..1ef58c51c21 100644 --- a/sound/pci/via82xx.c +++ b/sound/pci/via82xx.c @@ -1687,7 +1687,7 @@ static int snd_via8233_pcmdxs_volume_put(struct snd_kcontrol *kcontrol, return change; } -static const DECLARE_TLV_DB_SCALE(db_scale_dxs, -9450, 150, 1); +static const DECLARE_TLV_DB_SCALE(db_scale_dxs, -4650, 150, 1); static struct snd_kcontrol_new snd_via8233_pcmdxs_volume_control __devinitdata = { .name = "PCM Playback Volume", diff --git a/sound/soc/codecs/wm8990.c b/sound/soc/codecs/wm8990.c index c518c3e5aa3..40cd274eb1e 100644 --- a/sound/soc/codecs/wm8990.c +++ b/sound/soc/codecs/wm8990.c @@ -729,7 +729,7 @@ SND_SOC_DAPM_MIXER_E("INMIXL", WM8990_INTDRIVBITS, WM8990_INMIXL_PWR_BIT, 0, inmixer_event, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD), /* AINLMUX */ -SND_SOC_DAPM_MUX_E("AILNMUX", WM8990_INTDRIVBITS, WM8990_AINLMUX_PWR_BIT, 0, +SND_SOC_DAPM_MUX_E("AINLMUX", WM8990_INTDRIVBITS, WM8990_AINLMUX_PWR_BIT, 0, &wm8990_dapm_ainlmux_controls, inmixer_event, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD), @@ -740,7 +740,7 @@ SND_SOC_DAPM_MIXER_E("INMIXR", WM8990_INTDRIVBITS, WM8990_INMIXR_PWR_BIT, 0, inmixer_event, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD), /* AINRMUX */ -SND_SOC_DAPM_MUX_E("AIRNMUX", WM8990_INTDRIVBITS, WM8990_AINRMUX_PWR_BIT, 0, +SND_SOC_DAPM_MUX_E("AINRMUX", WM8990_INTDRIVBITS, WM8990_AINRMUX_PWR_BIT, 0, &wm8990_dapm_ainrmux_controls, inmixer_event, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD), @@ -848,40 +848,40 @@ static const struct snd_soc_dapm_route audio_map[] = { {"LIN12 PGA", "LIN2 Switch", "LIN2"}, /* LIN34 PGA */ {"LIN34 PGA", "LIN3 Switch", "LIN3"}, - {"LIN34 PGA", "LIN4 Switch", "LIN4"}, + {"LIN34 PGA", "LIN4 Switch", "LIN4/RXN"}, /* INMIXL */ {"INMIXL", "Record Left Volume", "LOMIX"}, {"INMIXL", "LIN2 Volume", "LIN2"}, {"INMIXL", "LINPGA12 Switch", "LIN12 PGA"}, {"INMIXL", "LINPGA34 Switch", "LIN34 PGA"}, - /* AILNMUX */ - {"AILNMUX", "INMIXL Mix", "INMIXL"}, - {"AILNMUX", "DIFFINL Mix", "LIN12PGA"}, - {"AILNMUX", "DIFFINL Mix", "LIN34PGA"}, - {"AILNMUX", "RXVOICE Mix", "LIN4/RXN"}, - {"AILNMUX", "RXVOICE Mix", "RIN4/RXP"}, + /* AINLMUX */ + {"AINLMUX", "INMIXL Mix", "INMIXL"}, + {"AINLMUX", "DIFFINL Mix", "LIN12 PGA"}, + {"AINLMUX", "DIFFINL Mix", "LIN34 PGA"}, + {"AINLMUX", "RXVOICE Mix", "LIN4/RXN"}, + {"AINLMUX", "RXVOICE Mix", "RIN4/RXP"}, /* ADC */ - {"Left ADC", NULL, "AILNMUX"}, + {"Left ADC", NULL, "AINLMUX"}, /* RIN12 PGA */ {"RIN12 PGA", "RIN1 Switch", "RIN1"}, {"RIN12 PGA", "RIN2 Switch", "RIN2"}, /* RIN34 PGA */ {"RIN34 PGA", "RIN3 Switch", "RIN3"}, - {"RIN34 PGA", "RIN4 Switch", "RIN4"}, + {"RIN34 PGA", "RIN4 Switch", "RIN4/RXP"}, /* INMIXL */ {"INMIXR", "Record Right Volume", "ROMIX"}, {"INMIXR", "RIN2 Volume", "RIN2"}, {"INMIXR", "RINPGA12 Switch", "RIN12 PGA"}, {"INMIXR", "RINPGA34 Switch", "RIN34 PGA"}, - /* AIRNMUX */ - {"AIRNMUX", "INMIXR Mix", "INMIXR"}, - {"AIRNMUX", "DIFFINR Mix", "RIN12PGA"}, - {"AIRNMUX", "DIFFINR Mix", "RIN34PGA"}, - {"AIRNMUX", "RXVOICE Mix", "RIN4/RXN"}, - {"AIRNMUX", "RXVOICE Mix", "RIN4/RXP"}, + /* AINRMUX */ + {"AINRMUX", "INMIXR Mix", "INMIXR"}, + {"AINRMUX", "DIFFINR Mix", "RIN12 PGA"}, + {"AINRMUX", "DIFFINR Mix", "RIN34 PGA"}, + {"AINRMUX", "RXVOICE Mix", "LIN4/RXN"}, + {"AINRMUX", "RXVOICE Mix", "RIN4/RXP"}, /* ADC */ - {"Right ADC", NULL, "AIRNMUX"}, + {"Right ADC", NULL, "AINRMUX"}, /* LOMIX */ {"LOMIX", "LOMIX RIN3 Bypass Switch", "RIN3"}, @@ -922,7 +922,7 @@ static const struct snd_soc_dapm_route audio_map[] = { {"LOPMIX", "LOPMIX Left Mixer PGA Switch", "LOPGA"}, /* OUT3MIX */ - {"OUT3MIX", "OUT3MIX LIN4/RXP Bypass Switch", "LIN4/RXP"}, + {"OUT3MIX", "OUT3MIX LIN4/RXP Bypass Switch", "LIN4/RXN"}, {"OUT3MIX", "OUT3MIX Left Out PGA Switch", "LOPGA"}, /* OUT4MIX */ @@ -949,7 +949,7 @@ static const struct snd_soc_dapm_route audio_map[] = { /* Output Pins */ {"LON", NULL, "LONMIX"}, {"LOP", NULL, "LOPMIX"}, - {"OUT", NULL, "OUT3MIX"}, + {"OUT3", NULL, "OUT3MIX"}, {"LOUT", NULL, "LOUT PGA"}, {"SPKN", NULL, "SPKMIX"}, {"ROUT", NULL, "ROUT PGA"}, diff --git a/sound/soc/davinci/Kconfig b/sound/soc/davinci/Kconfig index bd7392c9657..411a710be66 100644 --- a/sound/soc/davinci/Kconfig +++ b/sound/soc/davinci/Kconfig @@ -10,13 +10,14 @@ config SND_DAVINCI_SOC_I2S tristate config SND_DAVINCI_SOC_EVM - tristate "SoC Audio support for DaVinci EVM" - depends on SND_DAVINCI_SOC && MACH_DAVINCI_EVM + tristate "SoC Audio support for DaVinci DM6446 or DM355 EVM" + depends on SND_DAVINCI_SOC + depends on MACH_DAVINCI_EVM || MACH_DAVINCI_DM355_EVM select SND_DAVINCI_SOC_I2S select SND_SOC_TLV320AIC3X help Say Y if you want to add support for SoC audio on TI - DaVinci EVM platform. + DaVinci DM6446 or DM355 EVM platforms. config SND_DAVINCI_SOC_SFFSDR tristate "SoC Audio support for SFFSDR" diff --git a/sound/soc/davinci/davinci-evm.c b/sound/soc/davinci/davinci-evm.c index 9b90b347007..58fd1cbedd8 100644 --- a/sound/soc/davinci/davinci-evm.c +++ b/sound/soc/davinci/davinci-evm.c @@ -20,7 +20,11 @@ #include #include -#include +#include + +#include +#include +#include #include "../codecs/tlv320aic3x.h" #include "davinci-pcm.h" @@ -150,7 +154,7 @@ static struct snd_soc_card snd_soc_card_evm = { /* evm audio private data */ static struct aic3x_setup_data evm_aic3x_setup = { - .i2c_bus = 0, + .i2c_bus = 1, .i2c_address = 0x1b, }; @@ -161,36 +165,73 @@ static struct snd_soc_device evm_snd_devdata = { .codec_data = &evm_aic3x_setup, }; +/* DM6446 EVM uses ASP0; line-out is a pair of RCA jacks */ static struct resource evm_snd_resources[] = { { - .start = DAVINCI_MCBSP_BASE, - .end = DAVINCI_MCBSP_BASE + SZ_8K - 1, + .start = DAVINCI_ASP0_BASE, + .end = DAVINCI_ASP0_BASE + SZ_8K - 1, .flags = IORESOURCE_MEM, }, }; static struct evm_snd_platform_data evm_snd_data = { - .tx_dma_ch = DM644X_DMACH_MCBSP_TX, - .rx_dma_ch = DM644X_DMACH_MCBSP_RX, + .tx_dma_ch = DAVINCI_DMA_ASP0_TX, + .rx_dma_ch = DAVINCI_DMA_ASP0_RX, +}; + +/* DM335 EVM uses ASP1; line-out is a stereo mini-jack */ +static struct resource dm335evm_snd_resources[] = { + { + .start = DAVINCI_ASP1_BASE, + .end = DAVINCI_ASP1_BASE + SZ_8K - 1, + .flags = IORESOURCE_MEM, + }, +}; + +static struct evm_snd_platform_data dm335evm_snd_data = { + .tx_dma_ch = DAVINCI_DMA_ASP1_TX, + .rx_dma_ch = DAVINCI_DMA_ASP1_RX, }; static struct platform_device *evm_snd_device; static int __init evm_init(void) { + struct resource *resources; + unsigned num_resources; + struct evm_snd_platform_data *data; + int index; int ret; - evm_snd_device = platform_device_alloc("soc-audio", 0); + if (machine_is_davinci_evm()) { + davinci_cfg_reg(DM644X_MCBSP); + + resources = evm_snd_resources; + num_resources = ARRAY_SIZE(evm_snd_resources); + data = &evm_snd_data; + index = 0; + } else if (machine_is_davinci_dm355_evm()) { + /* we don't use ASP1 IRQs, or we'd need to mux them ... */ + davinci_cfg_reg(DM355_EVT8_ASP1_TX); + davinci_cfg_reg(DM355_EVT9_ASP1_RX); + + resources = dm335evm_snd_resources; + num_resources = ARRAY_SIZE(dm335evm_snd_resources); + data = &dm335evm_snd_data; + index = 1; + } else + return -EINVAL; + + evm_snd_device = platform_device_alloc("soc-audio", index); if (!evm_snd_device) return -ENOMEM; platform_set_drvdata(evm_snd_device, &evm_snd_devdata); evm_snd_devdata.dev = &evm_snd_device->dev; - platform_device_add_data(evm_snd_device, &evm_snd_data, - sizeof(evm_snd_data)); + platform_device_add_data(evm_snd_device, data, sizeof(*data)); - ret = platform_device_add_resources(evm_snd_device, evm_snd_resources, - ARRAY_SIZE(evm_snd_resources)); + ret = platform_device_add_resources(evm_snd_device, resources, + num_resources); if (ret) { platform_device_put(evm_snd_device); return ret; diff --git a/sound/soc/davinci/davinci-i2s.c b/sound/soc/davinci/davinci-i2s.c index ffdb9439d3d..b1ea52fc83c 100644 --- a/sound/soc/davinci/davinci-i2s.c +++ b/sound/soc/davinci/davinci-i2s.c @@ -24,6 +24,26 @@ #include "davinci-pcm.h" + +/* + * NOTE: terminology here is confusing. + * + * - This driver supports the "Audio Serial Port" (ASP), + * found on dm6446, dm355, and other DaVinci chips. + * + * - But it labels it a "Multi-channel Buffered Serial Port" + * (McBSP) as on older chips like the dm642 ... which was + * backward-compatible, possibly explaining that confusion. + * + * - OMAP chips have a controller called McBSP, which is + * incompatible with the DaVinci flavor of McBSP. + * + * - Newer DaVinci chips have a controller called McASP, + * incompatible with ASP and with either McBSP. + * + * In short: this uses ASP to implement I2S, not McBSP. + * And it won't be the only DaVinci implemention of I2S. + */ #define DAVINCI_MCBSP_DRR_REG 0x00 #define DAVINCI_MCBSP_DXR_REG 0x04 #define DAVINCI_MCBSP_SPCR_REG 0x08 @@ -421,7 +441,7 @@ static int davinci_i2s_probe(struct platform_device *pdev, { struct snd_soc_device *socdev = platform_get_drvdata(pdev); struct snd_soc_card *card = socdev->card; - struct snd_soc_dai *cpu_dai = card->dai_link[pdev->id].cpu_dai; + struct snd_soc_dai *cpu_dai = card->dai_link->cpu_dai; struct davinci_mcbsp_dev *dev; struct resource *mem, *ioarea; struct evm_snd_platform_data *pdata; @@ -448,7 +468,7 @@ static int davinci_i2s_probe(struct platform_device *pdev, cpu_dai->private_data = dev; - dev->clk = clk_get(&pdev->dev, "McBSPCLK"); + dev->clk = clk_get(&pdev->dev, NULL); if (IS_ERR(dev->clk)) { ret = -ENODEV; goto err_free_mem; @@ -483,7 +503,7 @@ static void davinci_i2s_remove(struct platform_device *pdev, { struct snd_soc_device *socdev = platform_get_drvdata(pdev); struct snd_soc_card *card = socdev->card; - struct snd_soc_dai *cpu_dai = card->dai_link[pdev->id].cpu_dai; + struct snd_soc_dai *cpu_dai = card->dai_link->cpu_dai; struct davinci_mcbsp_dev *dev = cpu_dai->private_data; struct resource *mem; diff --git a/sound/soc/davinci/davinci-pcm.c b/sound/soc/davinci/davinci-pcm.c index 7af3b5b3a53..a0599658848 100644 --- a/sound/soc/davinci/davinci-pcm.c +++ b/sound/soc/davinci/davinci-pcm.c @@ -22,6 +22,7 @@ #include #include +#include #include "davinci-pcm.h" @@ -51,7 +52,7 @@ struct davinci_runtime_data { spinlock_t lock; int period; /* current DMA period */ int master_lch; /* Master DMA channel */ - int slave_lch; /* Slave DMA channel */ + int slave_lch; /* linked parameter RAM reload slot */ struct davinci_pcm_dma_params *params; /* DMA params */ }; @@ -90,18 +91,18 @@ static void davinci_pcm_enqueue_dma(struct snd_pcm_substream *substream) dst_bidx = data_type; } - davinci_set_dma_src_params(lch, src, INCR, W8BIT); - davinci_set_dma_dest_params(lch, dst, INCR, W8BIT); - davinci_set_dma_src_index(lch, src_bidx, 0); - davinci_set_dma_dest_index(lch, dst_bidx, 0); - davinci_set_dma_transfer_params(lch, data_type, count, 1, 0, ASYNC); + edma_set_src(lch, src, INCR, W8BIT); + edma_set_dest(lch, dst, INCR, W8BIT); + edma_set_src_index(lch, src_bidx, 0); + edma_set_dest_index(lch, dst_bidx, 0); + edma_set_transfer_params(lch, data_type, count, 1, 0, ASYNC); prtd->period++; if (unlikely(prtd->period >= runtime->periods)) prtd->period = 0; } -static void davinci_pcm_dma_irq(int lch, u16 ch_status, void *data) +static void davinci_pcm_dma_irq(unsigned lch, u16 ch_status, void *data) { struct snd_pcm_substream *substream = data; struct davinci_runtime_data *prtd = substream->runtime->private_data; @@ -125,7 +126,7 @@ static int davinci_pcm_dma_request(struct snd_pcm_substream *substream) struct davinci_runtime_data *prtd = substream->runtime->private_data; struct snd_soc_pcm_runtime *rtd = substream->private_data; struct davinci_pcm_dma_params *dma_data = rtd->dai->cpu_dai->dma_data; - int tcc = TCC_ANY; + struct edmacc_param p_ram; int ret; if (!dma_data) @@ -134,22 +135,34 @@ static int davinci_pcm_dma_request(struct snd_pcm_substream *substream) prtd->params = dma_data; /* Request master DMA channel */ - ret = davinci_request_dma(prtd->params->channel, prtd->params->name, + ret = edma_alloc_channel(prtd->params->channel, davinci_pcm_dma_irq, substream, - &prtd->master_lch, &tcc, EVENTQ_0); - if (ret) + EVENTQ_0); + if (ret < 0) return ret; + prtd->master_lch = ret; - /* Request slave DMA channel */ - ret = davinci_request_dma(PARAM_ANY, "Link", - NULL, NULL, &prtd->slave_lch, &tcc, EVENTQ_0); - if (ret) { - davinci_free_dma(prtd->master_lch); + /* Request parameter RAM reload slot */ + ret = edma_alloc_slot(EDMA_SLOT_ANY); + if (ret < 0) { + edma_free_channel(prtd->master_lch); return ret; } + prtd->slave_lch = ret; - /* Link slave DMA channel in loopback */ - davinci_dma_link_lch(prtd->slave_lch, prtd->slave_lch); + /* Issue transfer completion IRQ when the channel completes a + * transfer, then always reload from the same slot (by a kind + * of loopback link). The completion IRQ handler will update + * the reload slot with a new buffer. + * + * REVISIT save p_ram here after setting up everything except + * the buffer and its length (ccnt) ... use it as a template + * so davinci_pcm_enqueue_dma() takes less time in IRQ. + */ + edma_read_slot(prtd->slave_lch, &p_ram); + p_ram.opt |= TCINTEN | EDMA_TCC(prtd->master_lch); + p_ram.link_bcntrld = prtd->slave_lch << 5; + edma_write_slot(prtd->slave_lch, &p_ram); return 0; } @@ -165,12 +178,12 @@ static int davinci_pcm_trigger(struct snd_pcm_substream *substream, int cmd) case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: - davinci_start_dma(prtd->master_lch); + edma_start(prtd->master_lch); break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: - davinci_stop_dma(prtd->master_lch); + edma_stop(prtd->master_lch); break; default: ret = -EINVAL; @@ -185,14 +198,14 @@ static int davinci_pcm_trigger(struct snd_pcm_substream *substream, int cmd) static int davinci_pcm_prepare(struct snd_pcm_substream *substream) { struct davinci_runtime_data *prtd = substream->runtime->private_data; - struct paramentry_descriptor temp; + struct edmacc_param temp; prtd->period = 0; davinci_pcm_enqueue_dma(substream); - /* Get slave channel dma params for master channel startup */ - davinci_get_dma_params(prtd->slave_lch, &temp); - davinci_set_dma_params(prtd->master_lch, &temp); + /* Copy self-linked parameter RAM entry into master channel */ + edma_read_slot(prtd->slave_lch, &temp); + edma_write_slot(prtd->master_lch, &temp); return 0; } @@ -208,7 +221,7 @@ davinci_pcm_pointer(struct snd_pcm_substream *substream) spin_lock(&prtd->lock); - davinci_dma_getposition(prtd->master_lch, &src, &dst); + edma_get_position(prtd->master_lch, &src, &dst); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) count = src - runtime->dma_addr; else @@ -253,10 +266,10 @@ static int davinci_pcm_close(struct snd_pcm_substream *substream) struct snd_pcm_runtime *runtime = substream->runtime; struct davinci_runtime_data *prtd = runtime->private_data; - davinci_dma_unlink_lch(prtd->slave_lch, prtd->slave_lch); + edma_unlink(prtd->slave_lch); - davinci_free_dma(prtd->slave_lch); - davinci_free_dma(prtd->master_lch); + edma_free_slot(prtd->slave_lch); + edma_free_channel(prtd->master_lch); kfree(prtd); diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index 99712f652d0..1cd149b9ce6 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c @@ -954,6 +954,9 @@ static int soc_remove(struct platform_device *pdev) struct snd_soc_platform *platform = card->platform; struct snd_soc_codec_device *codec_dev = socdev->codec_dev; + if (!card->instantiated) + return 0; + run_delayed_work(&card->delayed_work); if (platform->remove) diff --git a/sound/usb/usbaudio.c b/sound/usb/usbaudio.c index 823296d7d57..a6b88482637 100644 --- a/sound/usb/usbaudio.c +++ b/sound/usb/usbaudio.c @@ -3347,7 +3347,7 @@ static int snd_usb_create_quirk(struct snd_usb_audio *chip, [QUIRK_MIDI_YAMAHA] = snd_usb_create_midi_interface, [QUIRK_MIDI_MIDIMAN] = snd_usb_create_midi_interface, [QUIRK_MIDI_NOVATION] = snd_usb_create_midi_interface, - [QUIRK_MIDI_RAW] = snd_usb_create_midi_interface, + [QUIRK_MIDI_FASTLANE] = snd_usb_create_midi_interface, [QUIRK_MIDI_EMAGIC] = snd_usb_create_midi_interface, [QUIRK_MIDI_CME] = snd_usb_create_midi_interface, [QUIRK_AUDIO_STANDARD_INTERFACE] = create_standard_audio_quirk, diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h index 36e4f7a29ad..8e7f78941ba 100644 --- a/sound/usb/usbaudio.h +++ b/sound/usb/usbaudio.h @@ -153,7 +153,7 @@ enum quirk_type { QUIRK_MIDI_YAMAHA, QUIRK_MIDI_MIDIMAN, QUIRK_MIDI_NOVATION, - QUIRK_MIDI_RAW, + QUIRK_MIDI_FASTLANE, QUIRK_MIDI_EMAGIC, QUIRK_MIDI_CME, QUIRK_MIDI_US122L, diff --git a/sound/usb/usbmidi.c b/sound/usb/usbmidi.c index 26bad373fe6..2fb35cc22a3 100644 --- a/sound/usb/usbmidi.c +++ b/sound/usb/usbmidi.c @@ -1778,8 +1778,18 @@ int snd_usb_create_midi_interface(struct snd_usb_audio* chip, umidi->usb_protocol_ops = &snd_usbmidi_novation_ops; err = snd_usbmidi_detect_per_port_endpoints(umidi, endpoints); break; - case QUIRK_MIDI_RAW: + case QUIRK_MIDI_FASTLANE: umidi->usb_protocol_ops = &snd_usbmidi_raw_ops; + /* + * Interface 1 contains isochronous endpoints, but with the same + * numbers as in interface 0. Since it is interface 1 that the + * USB core has most recently seen, these descriptors are now + * associated with the endpoint numbers. This will foul up our + * attempts to submit bulk/interrupt URBs to the endpoints in + * interface 0, so we have to make sure that the USB core looks + * again at interface 0 by calling usb_set_interface() on it. + */ + usb_set_interface(umidi->chip->dev, 0, 0); err = snd_usbmidi_detect_per_port_endpoints(umidi, endpoints); break; case QUIRK_MIDI_EMAGIC: diff --git a/sound/usb/usbquirks.h b/sound/usb/usbquirks.h index 647ef502965..5d955aaad85 100644 --- a/sound/usb/usbquirks.h +++ b/sound/usb/usbquirks.h @@ -1868,7 +1868,7 @@ YAMAHA_DEVICE(0x7010, "UB99"), .data = & (const struct snd_usb_audio_quirk[]) { { .ifnum = 0, - .type = QUIRK_MIDI_RAW + .type = QUIRK_MIDI_FASTLANE }, { .ifnum = 1, diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 1ecbe2391c8..4d0dd390aa5 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -2301,10 +2301,11 @@ int kvm_init(void *opaque, unsigned int vcpu_size, bad_pfn = page_to_pfn(bad_page); - if (!alloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) { + if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) { r = -ENOMEM; goto out_free_0; } + cpumask_clear(cpus_hardware_enabled); r = kvm_arch_hardware_setup(); if (r < 0)