mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6
This commit is contained in:
commit
36432dae73
518 changed files with 20761 additions and 11393 deletions
|
@ -437,3 +437,10 @@ Why: Superseded by tdfxfb. I2C/DDC support used to live in a separate
|
|||
driver but this caused driver conflicts.
|
||||
Who: Jean Delvare <khali@linux-fr.org>
|
||||
Krzysztof Helt <krzysztof.h1@wp.pl>
|
||||
|
||||
---------------------------
|
||||
|
||||
What: CONFIG_RFKILL_INPUT
|
||||
When: 2.6.33
|
||||
Why: Should be implemented in userspace, policy daemon.
|
||||
Who: Johannes Berg <johannes@sipsolutions.net>
|
||||
|
|
|
@ -45,7 +45,7 @@ From then on, Kernel CAPI may call the registered callback functions for the
|
|||
device.
|
||||
|
||||
If the device becomes unusable for any reason (shutdown, disconnect ...), the
|
||||
driver has to call capi_ctr_reseted(). This will prevent further calls to the
|
||||
driver has to call capi_ctr_down(). This will prevent further calls to the
|
||||
callback functions by Kernel CAPI.
|
||||
|
||||
|
||||
|
@ -114,20 +114,36 @@ char *driver_name
|
|||
int (*load_firmware)(struct capi_ctr *ctrlr, capiloaddata *ldata)
|
||||
(optional) pointer to a callback function for sending firmware and
|
||||
configuration data to the device
|
||||
Return value: 0 on success, error code on error
|
||||
Called in process context.
|
||||
|
||||
void (*reset_ctr)(struct capi_ctr *ctrlr)
|
||||
pointer to a callback function for performing a reset on the device,
|
||||
releasing all registered applications
|
||||
(optional) pointer to a callback function for performing a reset on
|
||||
the device, releasing all registered applications
|
||||
Called in process context.
|
||||
|
||||
void (*register_appl)(struct capi_ctr *ctrlr, u16 applid,
|
||||
capi_register_params *rparam)
|
||||
void (*release_appl)(struct capi_ctr *ctrlr, u16 applid)
|
||||
pointers to callback functions for registration and deregistration of
|
||||
applications with the device
|
||||
Calls to these functions are serialized by Kernel CAPI so that only
|
||||
one call to any of them is active at any time.
|
||||
|
||||
u16 (*send_message)(struct capi_ctr *ctrlr, struct sk_buff *skb)
|
||||
pointer to a callback function for sending a CAPI message to the
|
||||
device
|
||||
Return value: CAPI error code
|
||||
If the method returns 0 (CAPI_NOERROR) the driver has taken ownership
|
||||
of the skb and the caller may no longer access it. If it returns a
|
||||
non-zero (error) value then ownership of the skb returns to the caller
|
||||
who may reuse or free it.
|
||||
The return value should only be used to signal problems with respect
|
||||
to accepting or queueing the message. Errors occurring during the
|
||||
actual processing of the message should be signaled with an
|
||||
appropriate reply message.
|
||||
Calls to this function are not serialized by Kernel CAPI, ie. it must
|
||||
be prepared to be re-entered.
|
||||
|
||||
char *(*procinfo)(struct capi_ctr *ctrlr)
|
||||
pointer to a callback function returning the entry for the device in
|
||||
|
@ -138,6 +154,8 @@ read_proc_t *ctr_read_proc
|
|||
system entry, /proc/capi/controllers/<n>; will be called with a
|
||||
pointer to the device's capi_ctr structure as the last (data) argument
|
||||
|
||||
Note: Callback functions are never called in interrupt context.
|
||||
|
||||
- to be filled in before calling capi_ctr_ready():
|
||||
|
||||
u8 manu[CAPI_MANUFACTURER_LEN]
|
||||
|
@ -153,6 +171,45 @@ u8 serial[CAPI_SERIAL_LEN]
|
|||
value to return for CAPI_GET_SERIAL
|
||||
|
||||
|
||||
4.3 The _cmsg Structure
|
||||
|
||||
(declared in <linux/isdn/capiutil.h>)
|
||||
|
||||
The _cmsg structure stores the contents of a CAPI 2.0 message in an easily
|
||||
accessible form. It contains members for all possible CAPI 2.0 parameters, of
|
||||
which only those appearing in the message type currently being processed are
|
||||
actually used. Unused members should be set to zero.
|
||||
|
||||
Members are named after the CAPI 2.0 standard names of the parameters they
|
||||
represent. See <linux/isdn/capiutil.h> for the exact spelling. Member data
|
||||
types are:
|
||||
|
||||
u8 for CAPI parameters of type 'byte'
|
||||
|
||||
u16 for CAPI parameters of type 'word'
|
||||
|
||||
u32 for CAPI parameters of type 'dword'
|
||||
|
||||
_cstruct for CAPI parameters of type 'struct' not containing any
|
||||
variably-sized (struct) subparameters (eg. 'Called Party Number')
|
||||
The member is a pointer to a buffer containing the parameter in
|
||||
CAPI encoding (length + content). It may also be NULL, which will
|
||||
be taken to represent an empty (zero length) parameter.
|
||||
|
||||
_cmstruct for CAPI parameters of type 'struct' containing 'struct'
|
||||
subparameters ('Additional Info' and 'B Protocol')
|
||||
The representation is a single byte containing one of the values:
|
||||
CAPI_DEFAULT: the parameter is empty
|
||||
CAPI_COMPOSE: the values of the subparameters are stored
|
||||
individually in the corresponding _cmsg structure members
|
||||
|
||||
Functions capi_cmsg2message() and capi_message2cmsg() are provided to convert
|
||||
messages between their transport encoding described in the CAPI 2.0 standard
|
||||
and their _cmsg structure representation. Note that capi_cmsg2message() does
|
||||
not know or check the size of its destination buffer. The caller must make
|
||||
sure it is big enough to accomodate the resulting CAPI message.
|
||||
|
||||
|
||||
5. Lower Layer Interface Functions
|
||||
|
||||
(declared in <linux/isdn/capilli.h>)
|
||||
|
@ -166,7 +223,7 @@ int detach_capi_ctr(struct capi_ctr *ctrlr)
|
|||
register/unregister a device (controller) with Kernel CAPI
|
||||
|
||||
void capi_ctr_ready(struct capi_ctr *ctrlr)
|
||||
void capi_ctr_reseted(struct capi_ctr *ctrlr)
|
||||
void capi_ctr_down(struct capi_ctr *ctrlr)
|
||||
signal controller ready/not ready
|
||||
|
||||
void capi_ctr_suspend_output(struct capi_ctr *ctrlr)
|
||||
|
@ -211,3 +268,32 @@ CAPIMSG_CONTROL(m) CAPIMSG_SETCONTROL(m, contr) Controller/PLCI/NCCI
|
|||
(u32)
|
||||
CAPIMSG_DATALEN(m) CAPIMSG_SETDATALEN(m, len) Data Length (u16)
|
||||
|
||||
|
||||
Library functions for working with _cmsg structures
|
||||
(from <linux/isdn/capiutil.h>):
|
||||
|
||||
unsigned capi_cmsg2message(_cmsg *cmsg, u8 *msg)
|
||||
Assembles a CAPI 2.0 message from the parameters in *cmsg, storing the
|
||||
result in *msg.
|
||||
|
||||
unsigned capi_message2cmsg(_cmsg *cmsg, u8 *msg)
|
||||
Disassembles the CAPI 2.0 message in *msg, storing the parameters in
|
||||
*cmsg.
|
||||
|
||||
unsigned capi_cmsg_header(_cmsg *cmsg, u16 ApplId, u8 Command, u8 Subcommand,
|
||||
u16 Messagenumber, u32 Controller)
|
||||
Fills the header part and address field of the _cmsg structure *cmsg
|
||||
with the given values, zeroing the remainder of the structure so only
|
||||
parameters with non-default values need to be changed before sending
|
||||
the message.
|
||||
|
||||
void capi_cmsg_answer(_cmsg *cmsg)
|
||||
Sets the low bit of the Subcommand field in *cmsg, thereby converting
|
||||
_REQ to _CONF and _IND to _RESP.
|
||||
|
||||
char *capi_cmd2str(u8 Command, u8 Subcommand)
|
||||
Returns the CAPI 2.0 message name corresponding to the given command
|
||||
and subcommand values, as a static ASCII string. The return value may
|
||||
be NULL if the command/subcommand is not one of those defined in the
|
||||
CAPI 2.0 standard.
|
||||
|
||||
|
|
76
Documentation/networking/ieee802154.txt
Normal file
76
Documentation/networking/ieee802154.txt
Normal file
|
@ -0,0 +1,76 @@
|
|||
|
||||
Linux IEEE 802.15.4 implementation
|
||||
|
||||
|
||||
Introduction
|
||||
============
|
||||
|
||||
The Linux-ZigBee project goal is to provide complete implementation
|
||||
of IEEE 802.15.4 / ZigBee / 6LoWPAN protocols. IEEE 802.15.4 is a stack
|
||||
of protocols for organizing Low-Rate Wireless Personal Area Networks.
|
||||
|
||||
Currently only IEEE 802.15.4 layer is implemented. We have choosen
|
||||
to use plain Berkeley socket API, the generic Linux networking stack
|
||||
to transfer IEEE 802.15.4 messages and a special protocol over genetlink
|
||||
for configuration/management
|
||||
|
||||
|
||||
Socket API
|
||||
==========
|
||||
|
||||
int sd = socket(PF_IEEE802154, SOCK_DGRAM, 0);
|
||||
.....
|
||||
|
||||
The address family, socket addresses etc. are defined in the
|
||||
include/net/ieee802154/af_ieee802154.h header or in the special header
|
||||
in our userspace package (see either linux-zigbee sourceforge download page
|
||||
or git tree at git://linux-zigbee.git.sourceforge.net/gitroot/linux-zigbee).
|
||||
|
||||
One can use SOCK_RAW for passing raw data towards device xmit function. YMMV.
|
||||
|
||||
|
||||
MLME - MAC Level Management
|
||||
============================
|
||||
|
||||
Most of IEEE 802.15.4 MLME interfaces are directly mapped on netlink commands.
|
||||
See the include/net/ieee802154/nl802154.h header. Our userspace tools package
|
||||
(see above) provides CLI configuration utility for radio interfaces and simple
|
||||
coordinator for IEEE 802.15.4 networks as an example users of MLME protocol.
|
||||
|
||||
|
||||
Kernel side
|
||||
=============
|
||||
|
||||
Like with WiFi, there are several types of devices implementing IEEE 802.15.4.
|
||||
1) 'HardMAC'. The MAC layer is implemented in the device itself, the device
|
||||
exports MLME and data API.
|
||||
2) 'SoftMAC' or just radio. These types of devices are just radio transceivers
|
||||
possibly with some kinds of acceleration like automatic CRC computation and
|
||||
comparation, automagic ACK handling, address matching, etc.
|
||||
|
||||
Those types of devices require different approach to be hooked into Linux kernel.
|
||||
|
||||
|
||||
HardMAC
|
||||
=======
|
||||
|
||||
See the header include/net/ieee802154/netdevice.h. You have to implement Linux
|
||||
net_device, with .type = ARPHRD_IEEE802154. Data is exchanged with socket family
|
||||
code via plain sk_buffs. The control block of sk_buffs will contain additional
|
||||
info as described in the struct ieee802154_mac_cb.
|
||||
|
||||
To hook the MLME interface you have to populate the ml_priv field of your
|
||||
net_device with a pointer to struct ieee802154_mlme_ops instance. All fields are
|
||||
required.
|
||||
|
||||
We provide an example of simple HardMAC driver at drivers/ieee802154/fakehard.c
|
||||
|
||||
|
||||
SoftMAC
|
||||
=======
|
||||
|
||||
We are going to provide intermediate layer impelementing IEEE 802.15.4 MAC
|
||||
in software. This is currently WIP.
|
||||
|
||||
See header include/net/ieee802154/mac802154.h and several drivers in
|
||||
drivers/ieee802154/
|
|
@ -1057,6 +1057,13 @@ disable_ipv6 - BOOLEAN
|
|||
address.
|
||||
Default: FALSE (enable IPv6 operation)
|
||||
|
||||
When this value is changed from 1 to 0 (IPv6 is being enabled),
|
||||
it will dynamically create a link-local address on the given
|
||||
interface and start Duplicate Address Detection, if necessary.
|
||||
|
||||
When this value is changed from 0 to 1 (IPv6 is being disabled),
|
||||
it will dynamically delete all address on the given interface.
|
||||
|
||||
accept_dad - INTEGER
|
||||
Whether to accept DAD (Duplicate Address Detection).
|
||||
0: Disable DAD
|
||||
|
|
|
@ -33,3 +33,40 @@ disable
|
|||
|
||||
A reboot is required to enable IPv6.
|
||||
|
||||
autoconf
|
||||
|
||||
Specifies whether to enable IPv6 address autoconfiguration
|
||||
on all interfaces. This might be used when one does not wish
|
||||
for addresses to be automatically generated from prefixes
|
||||
received in Router Advertisements.
|
||||
|
||||
The possible values and their effects are:
|
||||
|
||||
0
|
||||
IPv6 address autoconfiguration is disabled on all interfaces.
|
||||
|
||||
Only the IPv6 loopback address (::1) and link-local addresses
|
||||
will be added to interfaces.
|
||||
|
||||
1
|
||||
IPv6 address autoconfiguration is enabled on all interfaces.
|
||||
|
||||
This is the default value.
|
||||
|
||||
disable_ipv6
|
||||
|
||||
Specifies whether to disable IPv6 on all interfaces.
|
||||
This might be used when no IPv6 addresses are desired.
|
||||
|
||||
The possible values and their effects are:
|
||||
|
||||
0
|
||||
IPv6 is enabled on all interfaces.
|
||||
|
||||
This is the default value.
|
||||
|
||||
1
|
||||
IPv6 is disabled on all interfaces.
|
||||
|
||||
No IPv6 addresses will be added to interfaces.
|
||||
|
||||
|
|
53
Documentation/powerpc/dts-bindings/can/sja1000.txt
Normal file
53
Documentation/powerpc/dts-bindings/can/sja1000.txt
Normal file
|
@ -0,0 +1,53 @@
|
|||
Memory mapped SJA1000 CAN controller from NXP (formerly Philips)
|
||||
|
||||
Required properties:
|
||||
|
||||
- compatible : should be "nxp,sja1000".
|
||||
|
||||
- reg : should specify the chip select, address offset and size required
|
||||
to map the registers of the SJA1000. The size is usually 0x80.
|
||||
|
||||
- interrupts: property with a value describing the interrupt source
|
||||
(number and sensitivity) required for the SJA1000.
|
||||
|
||||
Optional properties:
|
||||
|
||||
- nxp,external-clock-frequency : Frequency of the external oscillator
|
||||
clock in Hz. Note that the internal clock frequency used by the
|
||||
SJA1000 is half of that value. If not specified, a default value
|
||||
of 16000000 (16 MHz) is used.
|
||||
|
||||
- nxp,tx-output-mode : operation mode of the TX output control logic:
|
||||
<0x0> : bi-phase output mode
|
||||
<0x1> : normal output mode (default)
|
||||
<0x2> : test output mode
|
||||
<0x3> : clock output mode
|
||||
|
||||
- nxp,tx-output-config : TX output pin configuration:
|
||||
<0x01> : TX0 invert
|
||||
<0x02> : TX0 pull-down (default)
|
||||
<0x04> : TX0 pull-up
|
||||
<0x06> : TX0 push-pull
|
||||
<0x08> : TX1 invert
|
||||
<0x10> : TX1 pull-down
|
||||
<0x20> : TX1 pull-up
|
||||
<0x30> : TX1 push-pull
|
||||
|
||||
- nxp,clock-out-frequency : clock frequency in Hz on the CLKOUT pin.
|
||||
If not specified or if the specified value is 0, the CLKOUT pin
|
||||
will be disabled.
|
||||
|
||||
- nxp,no-comparator-bypass : Allows to disable the CAN input comperator.
|
||||
|
||||
For futher information, please have a look to the SJA1000 data sheet.
|
||||
|
||||
Examples:
|
||||
|
||||
can@3,100 {
|
||||
compatible = "nxp,sja1000";
|
||||
reg = <3 0x100 0x80>;
|
||||
interrupts = <2 0>;
|
||||
interrupt-parent = <&mpic>;
|
||||
nxp,external-clock-frequency = <16000000>;
|
||||
};
|
||||
|
|
@ -1,571 +1,136 @@
|
|||
rfkill - RF switch subsystem support
|
||||
====================================
|
||||
rfkill - RF kill switch support
|
||||
===============================
|
||||
|
||||
1 Introduction
|
||||
2 Implementation details
|
||||
3 Kernel driver guidelines
|
||||
3.1 wireless device drivers
|
||||
3.2 platform/switch drivers
|
||||
3.3 input device drivers
|
||||
4 Kernel API
|
||||
5 Userspace support
|
||||
1. Introduction
|
||||
2. Implementation details
|
||||
3. Kernel driver guidelines
|
||||
4. Kernel API
|
||||
5. Userspace support
|
||||
|
||||
|
||||
1. Introduction:
|
||||
1. Introduction
|
||||
|
||||
The rfkill switch subsystem exists to add a generic interface to circuitry that
|
||||
can enable or disable the signal output of a wireless *transmitter* of any
|
||||
type. By far, the most common use is to disable radio-frequency transmitters.
|
||||
The rfkill subsystem provides a generic interface to disabling any radio
|
||||
transmitter in the system. When a transmitter is blocked, it shall not
|
||||
radiate any power.
|
||||
|
||||
Note that disabling the signal output means that the the transmitter is to be
|
||||
made to not emit any energy when "blocked". rfkill is not about blocking data
|
||||
transmissions, it is about blocking energy emission.
|
||||
The subsystem also provides the ability to react on button presses and
|
||||
disable all transmitters of a certain type (or all). This is intended for
|
||||
situations where transmitters need to be turned off, for example on
|
||||
aircraft.
|
||||
|
||||
The rfkill subsystem offers support for keys and switches often found on
|
||||
laptops to enable wireless devices like WiFi and Bluetooth, so that these keys
|
||||
and switches actually perform an action in all wireless devices of a given type
|
||||
attached to the system.
|
||||
|
||||
The buttons to enable and disable the wireless transmitters are important in
|
||||
situations where the user is for example using his laptop on a location where
|
||||
radio-frequency transmitters _must_ be disabled (e.g. airplanes).
|
||||
|
||||
Because of this requirement, userspace support for the keys should not be made
|
||||
mandatory. Because userspace might want to perform some additional smarter
|
||||
tasks when the key is pressed, rfkill provides userspace the possibility to
|
||||
take over the task to handle the key events.
|
||||
|
||||
===============================================================================
|
||||
2: Implementation details
|
||||
2. Implementation details
|
||||
|
||||
The rfkill subsystem is composed of various components: the rfkill class, the
|
||||
rfkill-input module (an input layer handler), and some specific input layer
|
||||
events.
|
||||
|
||||
The rfkill class provides kernel drivers with an interface that allows them to
|
||||
know when they should enable or disable a wireless network device transmitter.
|
||||
This is enabled by the CONFIG_RFKILL Kconfig option.
|
||||
The rfkill class is provided for kernel drivers to register their radio
|
||||
transmitter with the kernel, provide methods for turning it on and off and,
|
||||
optionally, letting the system know about hardware-disabled states that may
|
||||
be implemented on the device. This code is enabled with the CONFIG_RFKILL
|
||||
Kconfig option, which drivers can "select".
|
||||
|
||||
The rfkill class support makes sure userspace will be notified of all state
|
||||
changes on rfkill devices through uevents. It provides a notification chain
|
||||
for interested parties in the kernel to also get notified of rfkill state
|
||||
changes in other drivers. It creates several sysfs entries which can be used
|
||||
by userspace. See section "Userspace support".
|
||||
The rfkill class code also notifies userspace of state changes, this is
|
||||
achieved via uevents. It also provides some sysfs files for userspace to
|
||||
check the status of radio transmitters. See the "Userspace support" section
|
||||
below.
|
||||
|
||||
The rfkill-input module provides the kernel with the ability to implement a
|
||||
basic response when the user presses a key or button (or toggles a switch)
|
||||
related to rfkill functionality. It is an in-kernel implementation of default
|
||||
policy of reacting to rfkill-related input events and neither mandatory nor
|
||||
required for wireless drivers to operate. It is enabled by the
|
||||
CONFIG_RFKILL_INPUT Kconfig option.
|
||||
|
||||
rfkill-input is a rfkill-related events input layer handler. This handler will
|
||||
listen to all rfkill key events and will change the rfkill state of the
|
||||
wireless devices accordingly. With this option enabled userspace could either
|
||||
do nothing or simply perform monitoring tasks.
|
||||
The rfkill-input code implements a basic response to rfkill buttons -- it
|
||||
implements turning on/off all devices of a certain class (or all).
|
||||
|
||||
The rfkill-input module also provides EPO (emergency power-off) functionality
|
||||
for all wireless transmitters. This function cannot be overridden, and it is
|
||||
always active. rfkill EPO is related to *_RFKILL_ALL input layer events.
|
||||
When the device is hard-blocked (either by a call to rfkill_set_hw_state()
|
||||
or from query_hw_block) set_block() will be invoked but drivers can well
|
||||
ignore the method call since they can use the return value of the function
|
||||
rfkill_set_hw_state() to sync the software state instead of keeping track
|
||||
of calls to set_block().
|
||||
|
||||
|
||||
Important terms for the rfkill subsystem:
|
||||
The entire functionality is spread over more than one subsystem:
|
||||
|
||||
In order to avoid confusion, we avoid the term "switch" in rfkill when it is
|
||||
referring to an electronic control circuit that enables or disables a
|
||||
transmitter. We reserve it for the physical device a human manipulates
|
||||
(which is an input device, by the way):
|
||||
* The kernel input layer generates KEY_WWAN, KEY_WLAN etc. and
|
||||
SW_RFKILL_ALL -- when the user presses a button. Drivers for radio
|
||||
transmitters generally do not register to the input layer, unless the
|
||||
device really provides an input device (i.e. a button that has no
|
||||
effect other than generating a button press event)
|
||||
|
||||
rfkill switch:
|
||||
* The rfkill-input code hooks up to these events and switches the soft-block
|
||||
of the various radio transmitters, depending on the button type.
|
||||
|
||||
A physical device a human manipulates. Its state can be perceived by
|
||||
the kernel either directly (through a GPIO pin, ACPI GPE) or by its
|
||||
effect on a rfkill line of a wireless device.
|
||||
* The rfkill drivers turn off/on their transmitters as requested.
|
||||
|
||||
rfkill controller:
|
||||
* The rfkill class will generate userspace notifications (uevents) to tell
|
||||
userspace what the current state is.
|
||||
|
||||
A hardware circuit that controls the state of a rfkill line, which a
|
||||
kernel driver can interact with *to modify* that state (i.e. it has
|
||||
either write-only or read/write access).
|
||||
|
||||
rfkill line:
|
||||
|
||||
An input channel (hardware or software) of a wireless device, which
|
||||
causes a wireless transmitter to stop emitting energy (BLOCK) when it
|
||||
is active. Point of view is extremely important here: rfkill lines are
|
||||
always seen from the PoV of a wireless device (and its driver).
|
||||
3. Kernel driver guidelines
|
||||
|
||||
soft rfkill line/software rfkill line:
|
||||
|
||||
A rfkill line the wireless device driver can directly change the state
|
||||
of. Related to rfkill_state RFKILL_STATE_SOFT_BLOCKED.
|
||||
Drivers for radio transmitters normally implement only the rfkill class.
|
||||
These drivers may not unblock the transmitter based on own decisions, they
|
||||
should act on information provided by the rfkill class only.
|
||||
|
||||
hard rfkill line/hardware rfkill line:
|
||||
Platform drivers might implement input devices if the rfkill button is just
|
||||
that, a button. If that button influences the hardware then you need to
|
||||
implement an rfkill class instead. This also applies if the platform provides
|
||||
a way to turn on/off the transmitter(s).
|
||||
|
||||
A rfkill line that works fully in hardware or firmware, and that cannot
|
||||
be overridden by the kernel driver. The hardware device or the
|
||||
firmware just exports its status to the driver, but it is read-only.
|
||||
Related to rfkill_state RFKILL_STATE_HARD_BLOCKED.
|
||||
During suspend/hibernation, transmitters should only be left enabled when
|
||||
wake-on wlan or similar functionality requires it and the device wasn't
|
||||
blocked before suspend/hibernate. Note that it may be necessary to update
|
||||
the rfkill subsystem's idea of what the current state is at resume time if
|
||||
the state may have changed over suspend.
|
||||
|
||||
The enum rfkill_state describes the rfkill state of a transmitter:
|
||||
|
||||
When a rfkill line or rfkill controller is in the RFKILL_STATE_UNBLOCKED state,
|
||||
the wireless transmitter (radio TX circuit for example) is *enabled*. When the
|
||||
it is in the RFKILL_STATE_SOFT_BLOCKED or RFKILL_STATE_HARD_BLOCKED, the
|
||||
wireless transmitter is to be *blocked* from operating.
|
||||
|
||||
RFKILL_STATE_SOFT_BLOCKED indicates that a call to toggle_radio() can change
|
||||
that state. RFKILL_STATE_HARD_BLOCKED indicates that a call to toggle_radio()
|
||||
will not be able to change the state and will return with a suitable error if
|
||||
attempts are made to set the state to RFKILL_STATE_UNBLOCKED.
|
||||
|
||||
RFKILL_STATE_HARD_BLOCKED is used by drivers to signal that the device is
|
||||
locked in the BLOCKED state by a hardwire rfkill line (typically an input pin
|
||||
that, when active, forces the transmitter to be disabled) which the driver
|
||||
CANNOT override.
|
||||
|
||||
Full rfkill functionality requires two different subsystems to cooperate: the
|
||||
input layer and the rfkill class. The input layer issues *commands* to the
|
||||
entire system requesting that devices registered to the rfkill class change
|
||||
state. The way this interaction happens is not complex, but it is not obvious
|
||||
either:
|
||||
|
||||
Kernel Input layer:
|
||||
|
||||
* Generates KEY_WWAN, KEY_WLAN, KEY_BLUETOOTH, SW_RFKILL_ALL, and
|
||||
other such events when the user presses certain keys, buttons, or
|
||||
toggles certain physical switches.
|
||||
|
||||
THE INPUT LAYER IS NEVER USED TO PROPAGATE STATUS, NOTIFICATIONS OR THE
|
||||
KIND OF STUFF AN ON-SCREEN-DISPLAY APPLICATION WOULD REPORT. It is
|
||||
used to issue *commands* for the system to change behaviour, and these
|
||||
commands may or may not be carried out by some kernel driver or
|
||||
userspace application. It follows that doing user feedback based only
|
||||
on input events is broken, as there is no guarantee that an input event
|
||||
will be acted upon.
|
||||
|
||||
Most wireless communication device drivers implementing rfkill
|
||||
functionality MUST NOT generate these events, and have no reason to
|
||||
register themselves with the input layer. Doing otherwise is a common
|
||||
misconception. There is an API to propagate rfkill status change
|
||||
information, and it is NOT the input layer.
|
||||
|
||||
rfkill class:
|
||||
|
||||
* Calls a hook in a driver to effectively change the wireless
|
||||
transmitter state;
|
||||
* Keeps track of the wireless transmitter state (with help from
|
||||
the driver);
|
||||
* Generates userspace notifications (uevents) and a call to a
|
||||
notification chain (kernel) when there is a wireless transmitter
|
||||
state change;
|
||||
* Connects a wireless communications driver with the common rfkill
|
||||
control system, which, for example, allows actions such as
|
||||
"switch all bluetooth devices offline" to be carried out by
|
||||
userspace or by rfkill-input.
|
||||
|
||||
THE RFKILL CLASS NEVER ISSUES INPUT EVENTS. THE RFKILL CLASS DOES
|
||||
NOT LISTEN TO INPUT EVENTS. NO DRIVER USING THE RFKILL CLASS SHALL
|
||||
EVER LISTEN TO, OR ACT ON RFKILL INPUT EVENTS. Doing otherwise is
|
||||
a layering violation.
|
||||
|
||||
Most wireless data communication drivers in the kernel have just to
|
||||
implement the rfkill class API to work properly. Interfacing to the
|
||||
input layer is not often required (and is very often a *bug*) on
|
||||
wireless drivers.
|
||||
|
||||
Platform drivers often have to attach to the input layer to *issue*
|
||||
(but never to listen to) rfkill events for rfkill switches, and also to
|
||||
the rfkill class to export a control interface for the platform rfkill
|
||||
controllers to the rfkill subsystem. This does NOT mean the rfkill
|
||||
switch is attached to a rfkill class (doing so is almost always wrong).
|
||||
It just means the same kernel module is the driver for different
|
||||
devices (rfkill switches and rfkill controllers).
|
||||
|
||||
|
||||
Userspace input handlers (uevents) or kernel input handlers (rfkill-input):
|
||||
|
||||
* Implements the policy of what should happen when one of the input
|
||||
layer events related to rfkill operation is received.
|
||||
* Uses the sysfs interface (userspace) or private rfkill API calls
|
||||
to tell the devices registered with the rfkill class to change
|
||||
their state (i.e. translates the input layer event into real
|
||||
action).
|
||||
|
||||
* rfkill-input implements EPO by handling EV_SW SW_RFKILL_ALL 0
|
||||
(power off all transmitters) in a special way: it ignores any
|
||||
overrides and local state cache and forces all transmitters to the
|
||||
RFKILL_STATE_SOFT_BLOCKED state (including those which are already
|
||||
supposed to be BLOCKED).
|
||||
* rfkill EPO will remain active until rfkill-input receives an
|
||||
EV_SW SW_RFKILL_ALL 1 event. While the EPO is active, transmitters
|
||||
are locked in the blocked state (rfkill will refuse to unblock them).
|
||||
* rfkill-input implements different policies that the user can
|
||||
select for handling EV_SW SW_RFKILL_ALL 1. It will unlock rfkill,
|
||||
and either do nothing (leave transmitters blocked, but now unlocked),
|
||||
restore the transmitters to their state before the EPO, or unblock
|
||||
them all.
|
||||
|
||||
Userspace uevent handler or kernel platform-specific drivers hooked to the
|
||||
rfkill notifier chain:
|
||||
|
||||
* Taps into the rfkill notifier chain or to KOBJ_CHANGE uevents,
|
||||
in order to know when a device that is registered with the rfkill
|
||||
class changes state;
|
||||
* Issues feedback notifications to the user;
|
||||
* In the rare platforms where this is required, synthesizes an input
|
||||
event to command all *OTHER* rfkill devices to also change their
|
||||
statues when a specific rfkill device changes state.
|
||||
|
||||
|
||||
===============================================================================
|
||||
3: Kernel driver guidelines
|
||||
|
||||
Remember: point-of-view is everything for a driver that connects to the rfkill
|
||||
subsystem. All the details below must be measured/perceived from the point of
|
||||
view of the specific driver being modified.
|
||||
|
||||
The first thing one needs to know is whether his driver should be talking to
|
||||
the rfkill class or to the input layer. In rare cases (platform drivers), it
|
||||
could happen that you need to do both, as platform drivers often handle a
|
||||
variety of devices in the same driver.
|
||||
|
||||
Do not mistake input devices for rfkill controllers. The only type of "rfkill
|
||||
switch" device that is to be registered with the rfkill class are those
|
||||
directly controlling the circuits that cause a wireless transmitter to stop
|
||||
working (or the software equivalent of them), i.e. what we call a rfkill
|
||||
controller. Every other kind of "rfkill switch" is just an input device and
|
||||
MUST NOT be registered with the rfkill class.
|
||||
|
||||
A driver should register a device with the rfkill class when ALL of the
|
||||
following conditions are met (they define a rfkill controller):
|
||||
|
||||
1. The device is/controls a data communications wireless transmitter;
|
||||
|
||||
2. The kernel can interact with the hardware/firmware to CHANGE the wireless
|
||||
transmitter state (block/unblock TX operation);
|
||||
|
||||
3. The transmitter can be made to not emit any energy when "blocked":
|
||||
rfkill is not about blocking data transmissions, it is about blocking
|
||||
energy emission;
|
||||
|
||||
A driver should register a device with the input subsystem to issue
|
||||
rfkill-related events (KEY_WLAN, KEY_BLUETOOTH, KEY_WWAN, KEY_WIMAX,
|
||||
SW_RFKILL_ALL, etc) when ALL of the folowing conditions are met:
|
||||
|
||||
1. It is directly related to some physical device the user interacts with, to
|
||||
command the O.S./firmware/hardware to enable/disable a data communications
|
||||
wireless transmitter.
|
||||
|
||||
Examples of the physical device are: buttons, keys and switches the user
|
||||
will press/touch/slide/switch to enable or disable the wireless
|
||||
communication device.
|
||||
|
||||
2. It is NOT slaved to another device, i.e. there is no other device that
|
||||
issues rfkill-related input events in preference to this one.
|
||||
|
||||
Please refer to the corner cases and examples section for more details.
|
||||
|
||||
When in doubt, do not issue input events. For drivers that should generate
|
||||
input events in some platforms, but not in others (e.g. b43), the best solution
|
||||
is to NEVER generate input events in the first place. That work should be
|
||||
deferred to a platform-specific kernel module (which will know when to generate
|
||||
events through the rfkill notifier chain) or to userspace. This avoids the
|
||||
usual maintenance problems with DMI whitelisting.
|
||||
|
||||
|
||||
Corner cases and examples:
|
||||
====================================
|
||||
|
||||
1. If the device is an input device that, because of hardware or firmware,
|
||||
causes wireless transmitters to be blocked regardless of the kernel's will, it
|
||||
is still just an input device, and NOT to be registered with the rfkill class.
|
||||
|
||||
2. If the wireless transmitter switch control is read-only, it is an input
|
||||
device and not to be registered with the rfkill class (and maybe not to be made
|
||||
an input layer event source either, see below).
|
||||
|
||||
3. If there is some other device driver *closer* to the actual hardware the
|
||||
user interacted with (the button/switch/key) to issue an input event, THAT is
|
||||
the device driver that should be issuing input events.
|
||||
|
||||
E.g:
|
||||
[RFKILL slider switch] -- [GPIO hardware] -- [WLAN card rf-kill input]
|
||||
(platform driver) (wireless card driver)
|
||||
|
||||
The user is closer to the RFKILL slide switch plaform driver, so the driver
|
||||
which must issue input events is the platform driver looking at the GPIO
|
||||
hardware, and NEVER the wireless card driver (which is just a slave). It is
|
||||
very likely that there are other leaves than just the WLAN card rf-kill input
|
||||
(e.g. a bluetooth card, etc)...
|
||||
|
||||
On the other hand, some embedded devices do this:
|
||||
|
||||
[RFKILL slider switch] -- [WLAN card rf-kill input]
|
||||
(wireless card driver)
|
||||
|
||||
In this situation, the wireless card driver *could* register itself as an input
|
||||
device and issue rf-kill related input events... but in order to AVOID the need
|
||||
for DMI whitelisting, the wireless card driver does NOT do it. Userspace (HAL)
|
||||
or a platform driver (that exists only on these embedded devices) will do the
|
||||
dirty job of issuing the input events.
|
||||
|
||||
|
||||
COMMON MISTAKES in kernel drivers, related to rfkill:
|
||||
====================================
|
||||
|
||||
1. NEVER confuse input device keys and buttons with input device switches.
|
||||
|
||||
1a. Switches are always set or reset. They report the current state
|
||||
(on position or off position).
|
||||
|
||||
1b. Keys and buttons are either in the pressed or not-pressed state, and
|
||||
that's it. A "button" that latches down when you press it, and
|
||||
unlatches when you press it again is in fact a switch as far as input
|
||||
devices go.
|
||||
|
||||
Add the SW_* events you need for switches, do NOT try to emulate a button using
|
||||
KEY_* events just because there is no such SW_* event yet. Do NOT try to use,
|
||||
for example, KEY_BLUETOOTH when you should be using SW_BLUETOOTH instead.
|
||||
|
||||
2. Input device switches (sources of EV_SW events) DO store their current state
|
||||
(so you *must* initialize it by issuing a gratuitous input layer event on
|
||||
driver start-up and also when resuming from sleep), and that state CAN be
|
||||
queried from userspace through IOCTLs. There is no sysfs interface for this,
|
||||
but that doesn't mean you should break things trying to hook it to the rfkill
|
||||
class to get a sysfs interface :-)
|
||||
|
||||
3. Do not issue *_RFKILL_ALL events by default, unless you are sure it is the
|
||||
correct event for your switch/button. These events are emergency power-off
|
||||
events when they are trying to turn the transmitters off. An example of an
|
||||
input device which SHOULD generate *_RFKILL_ALL events is the wireless-kill
|
||||
switch in a laptop which is NOT a hotkey, but a real sliding/rocker switch.
|
||||
An example of an input device which SHOULD NOT generate *_RFKILL_ALL events by
|
||||
default, is any sort of hot key that is type-specific (e.g. the one for WLAN).
|
||||
|
||||
|
||||
3.1 Guidelines for wireless device drivers
|
||||
------------------------------------------
|
||||
|
||||
(in this text, rfkill->foo means the foo field of struct rfkill).
|
||||
|
||||
1. Each independent transmitter in a wireless device (usually there is only one
|
||||
transmitter per device) should have a SINGLE rfkill class attached to it.
|
||||
|
||||
2. If the device does not have any sort of hardware assistance to allow the
|
||||
driver to rfkill the device, the driver should emulate it by taking all actions
|
||||
required to silence the transmitter.
|
||||
|
||||
3. If it is impossible to silence the transmitter (i.e. it still emits energy,
|
||||
even if it is just in brief pulses, when there is no data to transmit and there
|
||||
is no hardware support to turn it off) do NOT lie to the users. Do not attach
|
||||
it to a rfkill class. The rfkill subsystem does not deal with data
|
||||
transmission, it deals with energy emission. If the transmitter is emitting
|
||||
energy, it is not blocked in rfkill terms.
|
||||
|
||||
4. It doesn't matter if the device has multiple rfkill input lines affecting
|
||||
the same transmitter, their combined state is to be exported as a single state
|
||||
per transmitter (see rule 1).
|
||||
|
||||
This rule exists because users of the rfkill subsystem expect to get (and set,
|
||||
when possible) the overall transmitter rfkill state, not of a particular rfkill
|
||||
line.
|
||||
|
||||
5. The wireless device driver MUST NOT leave the transmitter enabled during
|
||||
suspend and hibernation unless:
|
||||
|
||||
5.1. The transmitter has to be enabled for some sort of functionality
|
||||
like wake-on-wireless-packet or autonomous packed forwarding in a mesh
|
||||
network, and that functionality is enabled for this suspend/hibernation
|
||||
cycle.
|
||||
|
||||
AND
|
||||
|
||||
5.2. The device was not on a user-requested BLOCKED state before
|
||||
the suspend (i.e. the driver must NOT unblock a device, not even
|
||||
to support wake-on-wireless-packet or remain in the mesh).
|
||||
|
||||
In other words, there is absolutely no allowed scenario where a driver can
|
||||
automatically take action to unblock a rfkill controller (obviously, this deals
|
||||
with scenarios where soft-blocking or both soft and hard blocking is happening.
|
||||
Scenarios where hardware rfkill lines are the only ones blocking the
|
||||
transmitter are outside of this rule, since the wireless device driver does not
|
||||
control its input hardware rfkill lines in the first place).
|
||||
|
||||
6. During resume, rfkill will try to restore its previous state.
|
||||
|
||||
7. After a rfkill class is suspended, it will *not* call rfkill->toggle_radio
|
||||
until it is resumed.
|
||||
|
||||
|
||||
Example of a WLAN wireless driver connected to the rfkill subsystem:
|
||||
--------------------------------------------------------------------
|
||||
|
||||
A certain WLAN card has one input pin that causes it to block the transmitter
|
||||
and makes the status of that input pin available (only for reading!) to the
|
||||
kernel driver. This is a hard rfkill input line (it cannot be overridden by
|
||||
the kernel driver).
|
||||
|
||||
The card also has one PCI register that, if manipulated by the driver, causes
|
||||
it to block the transmitter. This is a soft rfkill input line.
|
||||
|
||||
It has also a thermal protection circuitry that shuts down its transmitter if
|
||||
the card overheats, and makes the status of that protection available (only for
|
||||
reading!) to the kernel driver. This is also a hard rfkill input line.
|
||||
|
||||
If either one of these rfkill lines are active, the transmitter is blocked by
|
||||
the hardware and forced offline.
|
||||
|
||||
The driver should allocate and attach to its struct device *ONE* instance of
|
||||
the rfkill class (there is only one transmitter).
|
||||
|
||||
It can implement the get_state() hook, and return RFKILL_STATE_HARD_BLOCKED if
|
||||
either one of its two hard rfkill input lines are active. If the two hard
|
||||
rfkill lines are inactive, it must return RFKILL_STATE_SOFT_BLOCKED if its soft
|
||||
rfkill input line is active. Only if none of the rfkill input lines are
|
||||
active, will it return RFKILL_STATE_UNBLOCKED.
|
||||
|
||||
Since the device has a hardware rfkill line, it IS subject to state changes
|
||||
external to rfkill. Therefore, the driver must make sure that it calls
|
||||
rfkill_force_state() to keep the status always up-to-date, and it must do a
|
||||
rfkill_force_state() on resume from sleep.
|
||||
|
||||
Every time the driver gets a notification from the card that one of its rfkill
|
||||
lines changed state (polling might be needed on badly designed cards that don't
|
||||
generate interrupts for such events), it recomputes the rfkill state as per
|
||||
above, and calls rfkill_force_state() to update it.
|
||||
|
||||
The driver should implement the toggle_radio() hook, that:
|
||||
|
||||
1. Returns an error if one of the hardware rfkill lines are active, and the
|
||||
caller asked for RFKILL_STATE_UNBLOCKED.
|
||||
|
||||
2. Activates the soft rfkill line if the caller asked for state
|
||||
RFKILL_STATE_SOFT_BLOCKED. It should do this even if one of the hard rfkill
|
||||
lines are active, effectively double-blocking the transmitter.
|
||||
|
||||
3. Deactivates the soft rfkill line if none of the hardware rfkill lines are
|
||||
active and the caller asked for RFKILL_STATE_UNBLOCKED.
|
||||
|
||||
===============================================================================
|
||||
4: Kernel API
|
||||
4. Kernel API
|
||||
|
||||
To build a driver with rfkill subsystem support, the driver should depend on
|
||||
(or select) the Kconfig symbol RFKILL; it should _not_ depend on RKFILL_INPUT.
|
||||
(or select) the Kconfig symbol RFKILL.
|
||||
|
||||
The hardware the driver talks to may be write-only (where the current state
|
||||
of the hardware is unknown), or read-write (where the hardware can be queried
|
||||
about its current state).
|
||||
|
||||
The rfkill class will call the get_state hook of a device every time it needs
|
||||
to know the *real* current state of the hardware. This can happen often, but
|
||||
it does not do any polling, so it is not enough on hardware that is subject
|
||||
to state changes outside of the rfkill subsystem.
|
||||
Calling rfkill_set_hw_state() when a state change happens is required from
|
||||
rfkill drivers that control devices that can be hard-blocked unless they also
|
||||
assign the poll_hw_block() callback (then the rfkill core will poll the
|
||||
device). Don't do this unless you cannot get the event in any other way.
|
||||
|
||||
Therefore, calling rfkill_force_state() when a state change happens is
|
||||
mandatory when the device has a hardware rfkill line, or when something else
|
||||
like the firmware could cause its state to be changed without going through the
|
||||
rfkill class.
|
||||
|
||||
Some hardware provides events when its status changes. In these cases, it is
|
||||
best for the driver to not provide a get_state hook, and instead register the
|
||||
rfkill class *already* with the correct status, and keep it updated using
|
||||
rfkill_force_state() when it gets an event from the hardware.
|
||||
|
||||
rfkill_force_state() must be used on the device resume handlers to update the
|
||||
rfkill status, should there be any chance of the device status changing during
|
||||
the sleep.
|
||||
5. Userspace support
|
||||
|
||||
There is no provision for a statically-allocated rfkill struct. You must
|
||||
use rfkill_allocate() to allocate one.
|
||||
|
||||
You should:
|
||||
- rfkill_allocate()
|
||||
- modify rfkill fields (flags, name)
|
||||
- modify state to the current hardware state (THIS IS THE ONLY TIME
|
||||
YOU CAN ACCESS state DIRECTLY)
|
||||
- rfkill_register()
|
||||
|
||||
The only way to set a device to the RFKILL_STATE_HARD_BLOCKED state is through
|
||||
a suitable return of get_state() or through rfkill_force_state().
|
||||
|
||||
When a device is in the RFKILL_STATE_HARD_BLOCKED state, the only way to switch
|
||||
it to a different state is through a suitable return of get_state() or through
|
||||
rfkill_force_state().
|
||||
|
||||
If toggle_radio() is called to set a device to state RFKILL_STATE_SOFT_BLOCKED
|
||||
when that device is already at the RFKILL_STATE_HARD_BLOCKED state, it should
|
||||
not return an error. Instead, it should try to double-block the transmitter,
|
||||
so that its state will change from RFKILL_STATE_HARD_BLOCKED to
|
||||
RFKILL_STATE_SOFT_BLOCKED should the hardware blocking cease.
|
||||
|
||||
Please refer to the source for more documentation.
|
||||
|
||||
===============================================================================
|
||||
5: Userspace support
|
||||
|
||||
rfkill devices issue uevents (with an action of "change"), with the following
|
||||
environment variables set:
|
||||
|
||||
RFKILL_NAME
|
||||
RFKILL_STATE
|
||||
RFKILL_TYPE
|
||||
|
||||
The ABI for these variables is defined by the sysfs attributes. It is best
|
||||
to take a quick look at the source to make sure of the possible values.
|
||||
|
||||
It is expected that HAL will trap those, and bridge them to DBUS, etc. These
|
||||
events CAN and SHOULD be used to give feedback to the user about the rfkill
|
||||
status of the system.
|
||||
|
||||
Input devices may issue events that are related to rfkill. These are the
|
||||
various KEY_* events and SW_* events supported by rfkill-input.c.
|
||||
|
||||
Userspace may not change the state of an rfkill switch in response to an
|
||||
input event, it should refrain from changing states entirely.
|
||||
|
||||
Userspace cannot assume it is the only source of control for rfkill switches.
|
||||
Their state can change due to firmware actions, direct user actions, and the
|
||||
rfkill-input EPO override for *_RFKILL_ALL.
|
||||
|
||||
When rfkill-input is not active, userspace must initiate a rfkill status
|
||||
change by writing to the "state" attribute in order for anything to happen.
|
||||
|
||||
Take particular care to implement EV_SW SW_RFKILL_ALL properly. When that
|
||||
switch is set to OFF, *every* rfkill device *MUST* be immediately put into the
|
||||
RFKILL_STATE_SOFT_BLOCKED state, no questions asked.
|
||||
|
||||
The following sysfs entries will be created:
|
||||
The following sysfs entries exist for every rfkill device:
|
||||
|
||||
name: Name assigned by driver to this key (interface or driver name).
|
||||
type: Name of the key type ("wlan", "bluetooth", etc).
|
||||
state: Current state of the transmitter
|
||||
0: RFKILL_STATE_SOFT_BLOCKED
|
||||
transmitter is forced off, but one can override it
|
||||
by a write to the state attribute;
|
||||
transmitter is turned off by software
|
||||
1: RFKILL_STATE_UNBLOCKED
|
||||
transmiter is NOT forced off, and may operate if
|
||||
all other conditions for such operation are met
|
||||
(such as interface is up and configured, etc);
|
||||
transmitter is (potentially) active
|
||||
2: RFKILL_STATE_HARD_BLOCKED
|
||||
transmitter is forced off by something outside of
|
||||
the driver's control. One cannot set a device to
|
||||
this state through writes to the state attribute;
|
||||
claim: 1: Userspace handles events, 0: Kernel handles events
|
||||
the driver's control.
|
||||
claim: 0: Kernel handles events (currently always reads that value)
|
||||
|
||||
Both the "state" and "claim" entries are also writable. For the "state" entry
|
||||
this means that when 1 or 0 is written, the device rfkill state (if not yet in
|
||||
the requested state), will be will be toggled accordingly.
|
||||
rfkill devices also issue uevents (with an action of "change"), with the
|
||||
following environment variables set:
|
||||
|
||||
For the "claim" entry writing 1 to it means that the kernel no longer handles
|
||||
key events even though RFKILL_INPUT input was enabled. When "claim" has been
|
||||
set to 0, userspace should make sure that it listens for the input events or
|
||||
check the sysfs "state" entry regularly to correctly perform the required tasks
|
||||
when the rkfill key is pressed.
|
||||
RFKILL_NAME
|
||||
RFKILL_STATE
|
||||
RFKILL_TYPE
|
||||
|
||||
A note about input devices and EV_SW events:
|
||||
The contents of these variables corresponds to the "name", "state" and
|
||||
"type" sysfs files explained above.
|
||||
|
||||
In order to know the current state of an input device switch (like
|
||||
SW_RFKILL_ALL), you will need to use an IOCTL. That information is not
|
||||
available through sysfs in a generic way at this time, and it is not available
|
||||
through the rfkill class AT ALL.
|
||||
An alternative userspace interface exists as a misc device /dev/rfkill,
|
||||
which allows userspace to obtain and set the state of rfkill devices and
|
||||
sets of devices. It also notifies userspace about device addition and
|
||||
removal. The API is a simple read/write API that is defined in
|
||||
linux/rfkill.h.
|
||||
|
|
25
MAINTAINERS
25
MAINTAINERS
|
@ -1545,6 +1545,13 @@ W: http://www.fi.muni.cz/~kas/cosa/
|
|||
S: Maintained
|
||||
F: drivers/net/wan/cosa*
|
||||
|
||||
CPMAC ETHERNET DRIVER
|
||||
P: Florian Fainelli
|
||||
M: florian@openwrt.org
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/net/cpmac.c
|
||||
|
||||
CPU FREQUENCY DRIVERS
|
||||
P: Dave Jones
|
||||
M: davej@redhat.com
|
||||
|
@ -2812,6 +2819,18 @@ L: linux1394-devel@lists.sourceforge.net
|
|||
S: Maintained
|
||||
F: drivers/ieee1394/raw1394*
|
||||
|
||||
IEEE 802.15.4 SUBSYSTEM
|
||||
P: Dmitry Eremin-Solenikov
|
||||
M: dbaryshkov@gmail.com
|
||||
P: Sergey Lapin
|
||||
M: slapin@ossfans.org
|
||||
L: linux-zigbee-devel@lists.sourceforge.net
|
||||
W: http://apps.sourceforge.net/trac/linux-zigbee
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/lumag/lowpan.git
|
||||
S: Maintained
|
||||
F: net/ieee802154/
|
||||
F: drivers/ieee801254/
|
||||
|
||||
INTEGRITY MEASUREMENT ARCHITECTURE (IMA)
|
||||
P: Mimi Zohar
|
||||
M: zohar@us.ibm.com
|
||||
|
@ -4746,9 +4765,9 @@ S: Supported
|
|||
F: fs/reiserfs/
|
||||
|
||||
RFKILL
|
||||
P: Ivo van Doorn
|
||||
M: IvDoorn@gmail.com
|
||||
L: netdev@vger.kernel.org
|
||||
P: Johannes Berg
|
||||
M: johannes@sipsolutions.net
|
||||
L: linux-wireless@vger.kernel.org
|
||||
S: Maintained
|
||||
F Documentation/rfkill.txt
|
||||
F: net/rfkill/
|
||||
|
|
|
@ -120,4 +120,6 @@
|
|||
#define EOWNERDEAD 136 /* Owner died */
|
||||
#define ENOTRECOVERABLE 137 /* State not recoverable */
|
||||
|
||||
#define ERFKILL 138 /* Operation not possible due to RF-kill */
|
||||
|
||||
#endif
|
||||
|
|
|
@ -35,21 +35,25 @@ static void tosa_bt_off(struct tosa_bt_data *data)
|
|||
gpio_set_value(data->gpio_reset, 0);
|
||||
}
|
||||
|
||||
static int tosa_bt_toggle_radio(void *data, enum rfkill_state state)
|
||||
static int tosa_bt_set_block(void *data, bool blocked)
|
||||
{
|
||||
pr_info("BT_RADIO going: %s\n",
|
||||
state == RFKILL_STATE_UNBLOCKED ? "on" : "off");
|
||||
pr_info("BT_RADIO going: %s\n", blocked ? "off" : "on");
|
||||
|
||||
if (state == RFKILL_STATE_UNBLOCKED) {
|
||||
if (!blocked) {
|
||||
pr_info("TOSA_BT: going ON\n");
|
||||
tosa_bt_on(data);
|
||||
} else {
|
||||
pr_info("TOSA_BT: going OFF\n");
|
||||
tosa_bt_off(data);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct rfkill_ops tosa_bt_rfkill_ops = {
|
||||
.set_block = tosa_bt_set_block,
|
||||
};
|
||||
|
||||
static int tosa_bt_probe(struct platform_device *dev)
|
||||
{
|
||||
int rc;
|
||||
|
@ -70,18 +74,14 @@ static int tosa_bt_probe(struct platform_device *dev)
|
|||
if (rc)
|
||||
goto err_pwr_dir;
|
||||
|
||||
rfk = rfkill_allocate(&dev->dev, RFKILL_TYPE_BLUETOOTH);
|
||||
rfk = rfkill_alloc("tosa-bt", &dev->dev, RFKILL_TYPE_BLUETOOTH,
|
||||
&tosa_bt_rfkill_ops, data);
|
||||
if (!rfk) {
|
||||
rc = -ENOMEM;
|
||||
goto err_rfk_alloc;
|
||||
}
|
||||
|
||||
rfk->name = "tosa-bt";
|
||||
rfk->toggle_radio = tosa_bt_toggle_radio;
|
||||
rfk->data = data;
|
||||
#ifdef CONFIG_RFKILL_LEDS
|
||||
rfk->led_trigger.name = "tosa-bt";
|
||||
#endif
|
||||
rfkill_set_led_trigger_name(rfk, "tosa-bt");
|
||||
|
||||
rc = rfkill_register(rfk);
|
||||
if (rc)
|
||||
|
@ -92,9 +92,7 @@ static int tosa_bt_probe(struct platform_device *dev)
|
|||
return 0;
|
||||
|
||||
err_rfkill:
|
||||
if (rfk)
|
||||
rfkill_free(rfk);
|
||||
rfk = NULL;
|
||||
rfkill_destroy(rfk);
|
||||
err_rfk_alloc:
|
||||
tosa_bt_off(data);
|
||||
err_pwr_dir:
|
||||
|
@ -113,8 +111,10 @@ static int __devexit tosa_bt_remove(struct platform_device *dev)
|
|||
|
||||
platform_set_drvdata(dev, NULL);
|
||||
|
||||
if (rfk)
|
||||
if (rfk) {
|
||||
rfkill_unregister(rfk);
|
||||
rfkill_destroy(rfk);
|
||||
}
|
||||
rfk = NULL;
|
||||
|
||||
tosa_bt_off(data);
|
||||
|
|
|
@ -31,7 +31,6 @@
|
|||
#include <linux/input.h>
|
||||
#include <linux/gpio.h>
|
||||
#include <linux/pda_power.h>
|
||||
#include <linux/rfkill.h>
|
||||
#include <linux/spi/spi.h>
|
||||
|
||||
#include <asm/setup.h>
|
||||
|
|
|
@ -119,6 +119,8 @@
|
|||
#define EOWNERDEAD 165 /* Owner died */
|
||||
#define ENOTRECOVERABLE 166 /* State not recoverable */
|
||||
|
||||
#define ERFKILL 167 /* Operation not possible due to RF-kill */
|
||||
|
||||
#define EDQUOT 1133 /* Quota exceeded */
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
|
|
@ -120,5 +120,6 @@
|
|||
#define EOWNERDEAD 254 /* Owner died */
|
||||
#define ENOTRECOVERABLE 255 /* State not recoverable */
|
||||
|
||||
#define ERFKILL 256 /* Operation not possible due to RF-kill */
|
||||
|
||||
#endif
|
||||
|
|
|
@ -668,6 +668,8 @@ struct ucc_slow_pram {
|
|||
#define UCC_GETH_UPSMR_RMM 0x00001000
|
||||
#define UCC_GETH_UPSMR_CAM 0x00000400
|
||||
#define UCC_GETH_UPSMR_BRO 0x00000200
|
||||
#define UCC_GETH_UPSMR_SMM 0x00000080
|
||||
#define UCC_GETH_UPSMR_SGMM 0x00000020
|
||||
|
||||
/* UCC Transmit On Demand Register (UTODR) */
|
||||
#define UCC_SLOW_TOD 0x8000
|
||||
|
|
|
@ -110,4 +110,6 @@
|
|||
#define EOWNERDEAD 132 /* Owner died */
|
||||
#define ENOTRECOVERABLE 133 /* State not recoverable */
|
||||
|
||||
#define ERFKILL 134 /* Operation not possible due to RF-kill */
|
||||
|
||||
#endif
|
||||
|
|
|
@ -107,3 +107,4 @@ obj-$(CONFIG_SSB) += ssb/
|
|||
obj-$(CONFIG_VIRTIO) += virtio/
|
||||
obj-$(CONFIG_STAGING) += staging/
|
||||
obj-y += platform/
|
||||
obj-y += ieee802154/
|
||||
|
|
|
@ -34,13 +34,6 @@ new_skb(ulong len)
|
|||
skb_reset_mac_header(skb);
|
||||
skb_reset_network_header(skb);
|
||||
skb->protocol = __constant_htons(ETH_P_AOE);
|
||||
skb->priority = 0;
|
||||
skb->next = skb->prev = NULL;
|
||||
|
||||
/* tell the network layer not to perform IP checksums
|
||||
* or to get the NIC to do it
|
||||
*/
|
||||
skb->ip_summed = CHECKSUM_NONE;
|
||||
}
|
||||
return skb;
|
||||
}
|
||||
|
|
|
@ -40,7 +40,7 @@
|
|||
#include <net/bluetooth/bluetooth.h>
|
||||
#include <net/bluetooth/hci_core.h>
|
||||
|
||||
#define VERSION "1.2"
|
||||
#define VERSION "1.3"
|
||||
|
||||
static int minor = MISC_DYNAMIC_MINOR;
|
||||
|
||||
|
@ -51,14 +51,8 @@ struct vhci_data {
|
|||
|
||||
wait_queue_head_t read_wait;
|
||||
struct sk_buff_head readq;
|
||||
|
||||
struct fasync_struct *fasync;
|
||||
};
|
||||
|
||||
#define VHCI_FASYNC 0x0010
|
||||
|
||||
static struct miscdevice vhci_miscdev;
|
||||
|
||||
static int vhci_open_dev(struct hci_dev *hdev)
|
||||
{
|
||||
set_bit(HCI_RUNNING, &hdev->flags);
|
||||
|
@ -105,9 +99,6 @@ static int vhci_send_frame(struct sk_buff *skb)
|
|||
memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
|
||||
skb_queue_tail(&data->readq, skb);
|
||||
|
||||
if (data->flags & VHCI_FASYNC)
|
||||
kill_fasync(&data->fasync, SIGIO, POLL_IN);
|
||||
|
||||
wake_up_interruptible(&data->read_wait);
|
||||
|
||||
return 0;
|
||||
|
@ -179,41 +170,31 @@ static inline ssize_t vhci_put_user(struct vhci_data *data,
|
|||
static ssize_t vhci_read(struct file *file,
|
||||
char __user *buf, size_t count, loff_t *pos)
|
||||
{
|
||||
DECLARE_WAITQUEUE(wait, current);
|
||||
struct vhci_data *data = file->private_data;
|
||||
struct sk_buff *skb;
|
||||
ssize_t ret = 0;
|
||||
|
||||
add_wait_queue(&data->read_wait, &wait);
|
||||
while (count) {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
|
||||
skb = skb_dequeue(&data->readq);
|
||||
if (!skb) {
|
||||
if (file->f_flags & O_NONBLOCK) {
|
||||
ret = -EAGAIN;
|
||||
break;
|
||||
}
|
||||
|
||||
if (signal_pending(current)) {
|
||||
ret = -ERESTARTSYS;
|
||||
break;
|
||||
}
|
||||
|
||||
schedule();
|
||||
continue;
|
||||
if (skb) {
|
||||
ret = vhci_put_user(data, skb, buf, count);
|
||||
if (ret < 0)
|
||||
skb_queue_head(&data->readq, skb);
|
||||
else
|
||||
kfree_skb(skb);
|
||||
break;
|
||||
}
|
||||
|
||||
if (access_ok(VERIFY_WRITE, buf, count))
|
||||
ret = vhci_put_user(data, skb, buf, count);
|
||||
else
|
||||
ret = -EFAULT;
|
||||
if (file->f_flags & O_NONBLOCK) {
|
||||
ret = -EAGAIN;
|
||||
break;
|
||||
}
|
||||
|
||||
kfree_skb(skb);
|
||||
break;
|
||||
ret = wait_event_interruptible(data->read_wait,
|
||||
!skb_queue_empty(&data->readq));
|
||||
if (ret < 0)
|
||||
break;
|
||||
}
|
||||
set_current_state(TASK_RUNNING);
|
||||
remove_wait_queue(&data->read_wait, &wait);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -223,9 +204,6 @@ static ssize_t vhci_write(struct file *file,
|
|||
{
|
||||
struct vhci_data *data = file->private_data;
|
||||
|
||||
if (!access_ok(VERIFY_READ, buf, count))
|
||||
return -EFAULT;
|
||||
|
||||
return vhci_get_user(data, buf, count);
|
||||
}
|
||||
|
||||
|
@ -259,11 +237,9 @@ static int vhci_open(struct inode *inode, struct file *file)
|
|||
skb_queue_head_init(&data->readq);
|
||||
init_waitqueue_head(&data->read_wait);
|
||||
|
||||
lock_kernel();
|
||||
hdev = hci_alloc_dev();
|
||||
if (!hdev) {
|
||||
kfree(data);
|
||||
unlock_kernel();
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
@ -284,12 +260,10 @@ static int vhci_open(struct inode *inode, struct file *file)
|
|||
BT_ERR("Can't register HCI device");
|
||||
kfree(data);
|
||||
hci_free_dev(hdev);
|
||||
unlock_kernel();
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
file->private_data = data;
|
||||
unlock_kernel();
|
||||
|
||||
return nonseekable_open(inode, file);
|
||||
}
|
||||
|
@ -310,48 +284,25 @@ static int vhci_release(struct inode *inode, struct file *file)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int vhci_fasync(int fd, struct file *file, int on)
|
||||
{
|
||||
struct vhci_data *data = file->private_data;
|
||||
int err = 0;
|
||||
|
||||
lock_kernel();
|
||||
err = fasync_helper(fd, file, on, &data->fasync);
|
||||
if (err < 0)
|
||||
goto out;
|
||||
|
||||
if (on)
|
||||
data->flags |= VHCI_FASYNC;
|
||||
else
|
||||
data->flags &= ~VHCI_FASYNC;
|
||||
|
||||
out:
|
||||
unlock_kernel();
|
||||
return err;
|
||||
}
|
||||
|
||||
static const struct file_operations vhci_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.read = vhci_read,
|
||||
.write = vhci_write,
|
||||
.poll = vhci_poll,
|
||||
.ioctl = vhci_ioctl,
|
||||
.open = vhci_open,
|
||||
.release = vhci_release,
|
||||
.fasync = vhci_fasync,
|
||||
};
|
||||
|
||||
static struct miscdevice vhci_miscdev= {
|
||||
.name = "vhci",
|
||||
.fops = &vhci_fops,
|
||||
.name = "vhci",
|
||||
.fops = &vhci_fops,
|
||||
.minor = MISC_DYNAMIC_MINOR,
|
||||
};
|
||||
|
||||
static int __init vhci_init(void)
|
||||
{
|
||||
BT_INFO("Virtual HCI driver ver %s", VERSION);
|
||||
|
||||
vhci_miscdev.minor = minor;
|
||||
|
||||
if (misc_register(&vhci_miscdev) < 0) {
|
||||
BT_ERR("Can't register misc device with minor %d", minor);
|
||||
return -EIO;
|
||||
|
@ -369,9 +320,6 @@ static void __exit vhci_exit(void)
|
|||
module_init(vhci_init);
|
||||
module_exit(vhci_exit);
|
||||
|
||||
module_param(minor, int, 0444);
|
||||
MODULE_PARM_DESC(minor, "Miscellaneous minor device number");
|
||||
|
||||
MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
|
||||
MODULE_DESCRIPTION("Bluetooth virtual HCI driver ver " VERSION);
|
||||
MODULE_VERSION(VERSION);
|
||||
|
|
22
drivers/ieee802154/Kconfig
Normal file
22
drivers/ieee802154/Kconfig
Normal file
|
@ -0,0 +1,22 @@
|
|||
menuconfig IEEE802154_DRIVERS
|
||||
bool "IEEE 802.15.4 drivers"
|
||||
depends on NETDEVICES && IEEE802154
|
||||
default y
|
||||
---help---
|
||||
Say Y here to get to see options for IEEE 802.15.4 Low-Rate
|
||||
Wireless Personal Area Network device drivers. This option alone
|
||||
does not add any kernel code.
|
||||
|
||||
If you say N, all options in this submenu will be skipped and
|
||||
disabled.
|
||||
|
||||
config IEEE802154_FAKEHARD
|
||||
tristate "Fake LR-WPAN driver with several interconnected devices"
|
||||
depends on IEEE802154_DRIVERS
|
||||
---help---
|
||||
Say Y here to enable the fake driver that serves as an example
|
||||
of HardMAC device driver.
|
||||
|
||||
This driver can also be built as a module. To do so say M here.
|
||||
The module will be called 'fakehard'.
|
||||
|
3
drivers/ieee802154/Makefile
Normal file
3
drivers/ieee802154/Makefile
Normal file
|
@ -0,0 +1,3 @@
|
|||
obj-$(CONFIG_IEEE802154_FAKEHARD) += fakehard.o
|
||||
|
||||
EXTRA_CFLAGS += -DDEBUG -DCONFIG_FFD
|
270
drivers/ieee802154/fakehard.c
Normal file
270
drivers/ieee802154/fakehard.c
Normal file
|
@ -0,0 +1,270 @@
|
|||
/*
|
||||
* Sample driver for HardMAC IEEE 802.15.4 devices
|
||||
*
|
||||
* Copyright (C) 2009 Siemens AG
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2
|
||||
* as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along
|
||||
* with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Written by:
|
||||
* Dmitry Eremin-Solenikov <dmitry.baryshkov@siemens.com>
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/if_arp.h>
|
||||
|
||||
#include <net/ieee802154/af_ieee802154.h>
|
||||
#include <net/ieee802154/netdevice.h>
|
||||
#include <net/ieee802154/mac_def.h>
|
||||
#include <net/ieee802154/nl802154.h>
|
||||
|
||||
static u16 fake_get_pan_id(struct net_device *dev)
|
||||
{
|
||||
BUG_ON(dev->type != ARPHRD_IEEE802154);
|
||||
|
||||
return 0xeba1;
|
||||
}
|
||||
|
||||
static u16 fake_get_short_addr(struct net_device *dev)
|
||||
{
|
||||
BUG_ON(dev->type != ARPHRD_IEEE802154);
|
||||
|
||||
return 0x1;
|
||||
}
|
||||
|
||||
static u8 fake_get_dsn(struct net_device *dev)
|
||||
{
|
||||
BUG_ON(dev->type != ARPHRD_IEEE802154);
|
||||
|
||||
return 0x00; /* DSN are implemented in HW, so return just 0 */
|
||||
}
|
||||
|
||||
static u8 fake_get_bsn(struct net_device *dev)
|
||||
{
|
||||
BUG_ON(dev->type != ARPHRD_IEEE802154);
|
||||
|
||||
return 0x00; /* BSN are implemented in HW, so return just 0 */
|
||||
}
|
||||
|
||||
static int fake_assoc_req(struct net_device *dev,
|
||||
struct ieee802154_addr *addr, u8 channel, u8 cap)
|
||||
{
|
||||
/* We simply emulate it here */
|
||||
return ieee802154_nl_assoc_confirm(dev, fake_get_short_addr(dev),
|
||||
IEEE802154_SUCCESS);
|
||||
}
|
||||
|
||||
static int fake_assoc_resp(struct net_device *dev,
|
||||
struct ieee802154_addr *addr, u16 short_addr, u8 status)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int fake_disassoc_req(struct net_device *dev,
|
||||
struct ieee802154_addr *addr, u8 reason)
|
||||
{
|
||||
return ieee802154_nl_disassoc_confirm(dev, IEEE802154_SUCCESS);
|
||||
}
|
||||
|
||||
static int fake_start_req(struct net_device *dev, struct ieee802154_addr *addr,
|
||||
u8 channel,
|
||||
u8 bcn_ord, u8 sf_ord, u8 pan_coord, u8 blx,
|
||||
u8 coord_realign)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int fake_scan_req(struct net_device *dev, u8 type, u32 channels,
|
||||
u8 duration)
|
||||
{
|
||||
u8 edl[27] = {};
|
||||
return ieee802154_nl_scan_confirm(dev, IEEE802154_SUCCESS, type,
|
||||
channels,
|
||||
type == IEEE802154_MAC_SCAN_ED ? edl : NULL);
|
||||
}
|
||||
|
||||
static struct ieee802154_mlme_ops fake_mlme = {
|
||||
.assoc_req = fake_assoc_req,
|
||||
.assoc_resp = fake_assoc_resp,
|
||||
.disassoc_req = fake_disassoc_req,
|
||||
.start_req = fake_start_req,
|
||||
.scan_req = fake_scan_req,
|
||||
|
||||
.get_pan_id = fake_get_pan_id,
|
||||
.get_short_addr = fake_get_short_addr,
|
||||
.get_dsn = fake_get_dsn,
|
||||
.get_bsn = fake_get_bsn,
|
||||
};
|
||||
|
||||
static int ieee802154_fake_open(struct net_device *dev)
|
||||
{
|
||||
netif_start_queue(dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ieee802154_fake_close(struct net_device *dev)
|
||||
{
|
||||
netif_stop_queue(dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ieee802154_fake_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
skb->iif = dev->ifindex;
|
||||
skb->dev = dev;
|
||||
dev->stats.tx_packets++;
|
||||
dev->stats.tx_bytes += skb->len;
|
||||
|
||||
dev->trans_start = jiffies;
|
||||
|
||||
/* FIXME: do hardware work here ... */
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int ieee802154_fake_ioctl(struct net_device *dev, struct ifreq *ifr,
|
||||
int cmd)
|
||||
{
|
||||
struct sockaddr_ieee802154 *sa =
|
||||
(struct sockaddr_ieee802154 *)&ifr->ifr_addr;
|
||||
u16 pan_id, short_addr;
|
||||
|
||||
switch (cmd) {
|
||||
case SIOCGIFADDR:
|
||||
/* FIXME: fixed here, get from device IRL */
|
||||
pan_id = fake_get_pan_id(dev);
|
||||
short_addr = fake_get_short_addr(dev);
|
||||
if (pan_id == IEEE802154_PANID_BROADCAST ||
|
||||
short_addr == IEEE802154_ADDR_BROADCAST)
|
||||
return -EADDRNOTAVAIL;
|
||||
|
||||
sa->family = AF_IEEE802154;
|
||||
sa->addr.addr_type = IEEE802154_ADDR_SHORT;
|
||||
sa->addr.pan_id = pan_id;
|
||||
sa->addr.short_addr = short_addr;
|
||||
return 0;
|
||||
}
|
||||
return -ENOIOCTLCMD;
|
||||
}
|
||||
|
||||
static int ieee802154_fake_mac_addr(struct net_device *dev, void *p)
|
||||
{
|
||||
return -EBUSY; /* HW address is built into the device */
|
||||
}
|
||||
|
||||
static const struct net_device_ops fake_ops = {
|
||||
.ndo_open = ieee802154_fake_open,
|
||||
.ndo_stop = ieee802154_fake_close,
|
||||
.ndo_start_xmit = ieee802154_fake_xmit,
|
||||
.ndo_do_ioctl = ieee802154_fake_ioctl,
|
||||
.ndo_set_mac_address = ieee802154_fake_mac_addr,
|
||||
};
|
||||
|
||||
|
||||
static void ieee802154_fake_setup(struct net_device *dev)
|
||||
{
|
||||
dev->addr_len = IEEE802154_ADDR_LEN;
|
||||
memset(dev->broadcast, 0xff, IEEE802154_ADDR_LEN);
|
||||
dev->features = NETIF_F_NO_CSUM;
|
||||
dev->needed_tailroom = 2; /* FCS */
|
||||
dev->mtu = 127;
|
||||
dev->tx_queue_len = 10;
|
||||
dev->type = ARPHRD_IEEE802154;
|
||||
dev->flags = IFF_NOARP | IFF_BROADCAST;
|
||||
dev->watchdog_timeo = 0;
|
||||
}
|
||||
|
||||
|
||||
static int __devinit ieee802154fake_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct net_device *dev =
|
||||
alloc_netdev(0, "hardwpan%d", ieee802154_fake_setup);
|
||||
int err;
|
||||
|
||||
if (!dev)
|
||||
return -ENOMEM;
|
||||
|
||||
memcpy(dev->dev_addr, "\xba\xbe\xca\xfe\xde\xad\xbe\xef",
|
||||
dev->addr_len);
|
||||
memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
|
||||
|
||||
dev->netdev_ops = &fake_ops;
|
||||
dev->ml_priv = &fake_mlme;
|
||||
|
||||
/*
|
||||
* If the name is a format string the caller wants us to do a
|
||||
* name allocation.
|
||||
*/
|
||||
if (strchr(dev->name, '%')) {
|
||||
err = dev_alloc_name(dev, dev->name);
|
||||
if (err < 0)
|
||||
goto out;
|
||||
}
|
||||
|
||||
SET_NETDEV_DEV(dev, &pdev->dev);
|
||||
|
||||
platform_set_drvdata(pdev, dev);
|
||||
|
||||
err = register_netdev(dev);
|
||||
if (err < 0)
|
||||
goto out;
|
||||
|
||||
|
||||
dev_info(&pdev->dev, "Added ieee802154 HardMAC hardware\n");
|
||||
return 0;
|
||||
|
||||
out:
|
||||
unregister_netdev(dev);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int __devexit ieee802154fake_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct net_device *dev = platform_get_drvdata(pdev);
|
||||
unregister_netdev(dev);
|
||||
free_netdev(dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct platform_device *ieee802154fake_dev;
|
||||
|
||||
static struct platform_driver ieee802154fake_driver = {
|
||||
.probe = ieee802154fake_probe,
|
||||
.remove = __devexit_p(ieee802154fake_remove),
|
||||
.driver = {
|
||||
.name = "ieee802154hardmac",
|
||||
.owner = THIS_MODULE,
|
||||
},
|
||||
};
|
||||
|
||||
static __init int fake_init(void)
|
||||
{
|
||||
ieee802154fake_dev = platform_device_register_simple(
|
||||
"ieee802154hardmac", -1, NULL, 0);
|
||||
return platform_driver_register(&ieee802154fake_driver);
|
||||
}
|
||||
|
||||
static __exit void fake_exit(void)
|
||||
{
|
||||
platform_driver_unregister(&ieee802154fake_driver);
|
||||
platform_device_unregister(ieee802154fake_dev);
|
||||
}
|
||||
|
||||
module_init(fake_init);
|
||||
module_exit(fake_exit);
|
||||
MODULE_LICENSE("GPL");
|
||||
|
|
@ -1394,8 +1394,8 @@ void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb,
|
|||
struct ipoib_dev_priv *priv = netdev_priv(dev);
|
||||
int e = skb_queue_empty(&priv->cm.skb_queue);
|
||||
|
||||
if (skb->dst)
|
||||
skb->dst->ops->update_pmtu(skb->dst, mtu);
|
||||
if (skb_dst(skb))
|
||||
skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
|
||||
|
||||
skb_queue_tail(&priv->cm.skb_queue, skb);
|
||||
if (e)
|
||||
|
|
|
@ -561,7 +561,7 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
|
|||
struct ipoib_neigh *neigh;
|
||||
unsigned long flags;
|
||||
|
||||
neigh = ipoib_neigh_alloc(skb->dst->neighbour, skb->dev);
|
||||
neigh = ipoib_neigh_alloc(skb_dst(skb)->neighbour, skb->dev);
|
||||
if (!neigh) {
|
||||
++dev->stats.tx_dropped;
|
||||
dev_kfree_skb_any(skb);
|
||||
|
@ -570,9 +570,9 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
|
|||
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
|
||||
path = __path_find(dev, skb->dst->neighbour->ha + 4);
|
||||
path = __path_find(dev, skb_dst(skb)->neighbour->ha + 4);
|
||||
if (!path) {
|
||||
path = path_rec_create(dev, skb->dst->neighbour->ha + 4);
|
||||
path = path_rec_create(dev, skb_dst(skb)->neighbour->ha + 4);
|
||||
if (!path)
|
||||
goto err_path;
|
||||
|
||||
|
@ -605,7 +605,7 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
|
|||
goto err_drop;
|
||||
}
|
||||
} else
|
||||
ipoib_send(dev, skb, path->ah, IPOIB_QPN(skb->dst->neighbour->ha));
|
||||
ipoib_send(dev, skb, path->ah, IPOIB_QPN(skb_dst(skb)->neighbour->ha));
|
||||
} else {
|
||||
neigh->ah = NULL;
|
||||
|
||||
|
@ -635,15 +635,15 @@ static void ipoib_path_lookup(struct sk_buff *skb, struct net_device *dev)
|
|||
struct ipoib_dev_priv *priv = netdev_priv(skb->dev);
|
||||
|
||||
/* Look up path record for unicasts */
|
||||
if (skb->dst->neighbour->ha[4] != 0xff) {
|
||||
if (skb_dst(skb)->neighbour->ha[4] != 0xff) {
|
||||
neigh_add_path(skb, dev);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Add in the P_Key for multicasts */
|
||||
skb->dst->neighbour->ha[8] = (priv->pkey >> 8) & 0xff;
|
||||
skb->dst->neighbour->ha[9] = priv->pkey & 0xff;
|
||||
ipoib_mcast_send(dev, skb->dst->neighbour->ha + 4, skb);
|
||||
skb_dst(skb)->neighbour->ha[8] = (priv->pkey >> 8) & 0xff;
|
||||
skb_dst(skb)->neighbour->ha[9] = priv->pkey & 0xff;
|
||||
ipoib_mcast_send(dev, skb_dst(skb)->neighbour->ha + 4, skb);
|
||||
}
|
||||
|
||||
static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
|
||||
|
@ -708,16 +708,16 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
struct ipoib_neigh *neigh;
|
||||
unsigned long flags;
|
||||
|
||||
if (likely(skb->dst && skb->dst->neighbour)) {
|
||||
if (unlikely(!*to_ipoib_neigh(skb->dst->neighbour))) {
|
||||
if (likely(skb_dst(skb) && skb_dst(skb)->neighbour)) {
|
||||
if (unlikely(!*to_ipoib_neigh(skb_dst(skb)->neighbour))) {
|
||||
ipoib_path_lookup(skb, dev);
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
neigh = *to_ipoib_neigh(skb->dst->neighbour);
|
||||
neigh = *to_ipoib_neigh(skb_dst(skb)->neighbour);
|
||||
|
||||
if (unlikely((memcmp(&neigh->dgid.raw,
|
||||
skb->dst->neighbour->ha + 4,
|
||||
skb_dst(skb)->neighbour->ha + 4,
|
||||
sizeof(union ib_gid))) ||
|
||||
(neigh->dev != dev))) {
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
|
@ -743,7 +743,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
return NETDEV_TX_OK;
|
||||
}
|
||||
} else if (neigh->ah) {
|
||||
ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(skb->dst->neighbour->ha));
|
||||
ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(skb_dst(skb)->neighbour->ha));
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
|
@ -772,7 +772,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
if ((be16_to_cpup((__be16 *) skb->data) != ETH_P_ARP) &&
|
||||
(be16_to_cpup((__be16 *) skb->data) != ETH_P_RARP)) {
|
||||
ipoib_warn(priv, "Unicast, no %s: type %04x, QPN %06x %pI6\n",
|
||||
skb->dst ? "neigh" : "dst",
|
||||
skb_dst(skb) ? "neigh" : "dst",
|
||||
be16_to_cpup((__be16 *) skb->data),
|
||||
IPOIB_QPN(phdr->hwaddr),
|
||||
phdr->hwaddr + 4);
|
||||
|
@ -817,7 +817,7 @@ static int ipoib_hard_header(struct sk_buff *skb,
|
|||
* destination address onto the front of the skb so we can
|
||||
* figure out where to send the packet later.
|
||||
*/
|
||||
if ((!skb->dst || !skb->dst->neighbour) && daddr) {
|
||||
if ((!skb_dst(skb) || !skb_dst(skb)->neighbour) && daddr) {
|
||||
struct ipoib_pseudoheader *phdr =
|
||||
(struct ipoib_pseudoheader *) skb_push(skb, sizeof *phdr);
|
||||
memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN);
|
||||
|
@ -1053,6 +1053,7 @@ static void ipoib_setup(struct net_device *dev)
|
|||
dev->tx_queue_len = ipoib_sendq_size * 2;
|
||||
dev->features = (NETIF_F_VLAN_CHALLENGED |
|
||||
NETIF_F_HIGHDMA);
|
||||
dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
|
||||
|
||||
memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN);
|
||||
|
||||
|
|
|
@ -261,7 +261,7 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
|
|||
|
||||
skb->dev = dev;
|
||||
|
||||
if (!skb->dst || !skb->dst->neighbour) {
|
||||
if (!skb_dst(skb) || !skb_dst(skb)->neighbour) {
|
||||
/* put pseudoheader back on for next time */
|
||||
skb_push(skb, sizeof (struct ipoib_pseudoheader));
|
||||
}
|
||||
|
@ -707,10 +707,10 @@ void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb)
|
|||
|
||||
out:
|
||||
if (mcast && mcast->ah) {
|
||||
if (skb->dst &&
|
||||
skb->dst->neighbour &&
|
||||
!*to_ipoib_neigh(skb->dst->neighbour)) {
|
||||
struct ipoib_neigh *neigh = ipoib_neigh_alloc(skb->dst->neighbour,
|
||||
if (skb_dst(skb) &&
|
||||
skb_dst(skb)->neighbour &&
|
||||
!*to_ipoib_neigh(skb_dst(skb)->neighbour)) {
|
||||
struct ipoib_neigh *neigh = ipoib_neigh_alloc(skb_dst(skb)->neighbour,
|
||||
skb->dev);
|
||||
|
||||
if (neigh) {
|
||||
|
|
|
@ -490,7 +490,14 @@ static void pars_2_message(_cmsg * cmsg)
|
|||
}
|
||||
}
|
||||
|
||||
/*-------------------------------------------------------*/
|
||||
/**
|
||||
* capi_cmsg2message() - assemble CAPI 2.0 message from _cmsg structure
|
||||
* @cmsg: _cmsg structure
|
||||
* @msg: buffer for assembled message
|
||||
*
|
||||
* Return value: 0 for success
|
||||
*/
|
||||
|
||||
unsigned capi_cmsg2message(_cmsg * cmsg, u8 * msg)
|
||||
{
|
||||
cmsg->m = msg;
|
||||
|
@ -553,7 +560,14 @@ static void message_2_pars(_cmsg * cmsg)
|
|||
}
|
||||
}
|
||||
|
||||
/*-------------------------------------------------------*/
|
||||
/**
|
||||
* capi_message2cmsg() - disassemble CAPI 2.0 message into _cmsg structure
|
||||
* @cmsg: _cmsg structure
|
||||
* @msg: buffer for assembled message
|
||||
*
|
||||
* Return value: 0 for success
|
||||
*/
|
||||
|
||||
unsigned capi_message2cmsg(_cmsg * cmsg, u8 * msg)
|
||||
{
|
||||
memset(cmsg, 0, sizeof(_cmsg));
|
||||
|
@ -573,7 +587,18 @@ unsigned capi_message2cmsg(_cmsg * cmsg, u8 * msg)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*-------------------------------------------------------*/
|
||||
/**
|
||||
* capi_cmsg_header() - initialize header part of _cmsg structure
|
||||
* @cmsg: _cmsg structure
|
||||
* @_ApplId: ApplID field value
|
||||
* @_Command: Command field value
|
||||
* @_Subcommand: Subcommand field value
|
||||
* @_Messagenumber: Message Number field value
|
||||
* @_Controller: Controller/PLCI/NCCI field value
|
||||
*
|
||||
* Return value: 0 for success
|
||||
*/
|
||||
|
||||
unsigned capi_cmsg_header(_cmsg * cmsg, u16 _ApplId,
|
||||
u8 _Command, u8 _Subcommand,
|
||||
u16 _Messagenumber, u32 _Controller)
|
||||
|
@ -641,6 +666,14 @@ static char *mnames[] =
|
|||
[0x4e] = "MANUFACTURER_RESP"
|
||||
};
|
||||
|
||||
/**
|
||||
* capi_cmd2str() - convert CAPI 2.0 command/subcommand number to name
|
||||
* @cmd: command number
|
||||
* @subcmd: subcommand number
|
||||
*
|
||||
* Return value: static string, NULL if command/subcommand unknown
|
||||
*/
|
||||
|
||||
char *capi_cmd2str(u8 cmd, u8 subcmd)
|
||||
{
|
||||
return mnames[command_2_index(cmd, subcmd)];
|
||||
|
@ -879,6 +912,11 @@ init:
|
|||
return cdb;
|
||||
}
|
||||
|
||||
/**
|
||||
* cdebbuf_free() - free CAPI debug buffer
|
||||
* @cdb: buffer to free
|
||||
*/
|
||||
|
||||
void cdebbuf_free(_cdebbuf *cdb)
|
||||
{
|
||||
if (likely(cdb == g_debbuf)) {
|
||||
|
@ -891,6 +929,16 @@ void cdebbuf_free(_cdebbuf *cdb)
|
|||
}
|
||||
|
||||
|
||||
/**
|
||||
* capi_message2str() - format CAPI 2.0 message for printing
|
||||
* @msg: CAPI 2.0 message
|
||||
*
|
||||
* Allocates a CAPI debug buffer and fills it with a printable representation
|
||||
* of the CAPI 2.0 message in @msg.
|
||||
* Return value: allocated debug buffer, NULL on error
|
||||
* The returned buffer should be freed by a call to cdebbuf_free() after use.
|
||||
*/
|
||||
|
||||
_cdebbuf *capi_message2str(u8 * msg)
|
||||
{
|
||||
_cdebbuf *cdb;
|
||||
|
@ -926,10 +974,23 @@ _cdebbuf *capi_message2str(u8 * msg)
|
|||
return cdb;
|
||||
}
|
||||
|
||||
/**
|
||||
* capi_cmsg2str() - format _cmsg structure for printing
|
||||
* @cmsg: _cmsg structure
|
||||
*
|
||||
* Allocates a CAPI debug buffer and fills it with a printable representation
|
||||
* of the CAPI 2.0 message stored in @cmsg by a previous call to
|
||||
* capi_cmsg2message() or capi_message2cmsg().
|
||||
* Return value: allocated debug buffer, NULL on error
|
||||
* The returned buffer should be freed by a call to cdebbuf_free() after use.
|
||||
*/
|
||||
|
||||
_cdebbuf *capi_cmsg2str(_cmsg * cmsg)
|
||||
{
|
||||
_cdebbuf *cdb;
|
||||
|
||||
if (!cmsg->m)
|
||||
return NULL; /* no message */
|
||||
cdb = cdebbuf_alloc();
|
||||
if (!cdb)
|
||||
return NULL;
|
||||
|
|
|
@ -377,14 +377,14 @@ void capi_ctr_ready(struct capi_ctr * card)
|
|||
EXPORT_SYMBOL(capi_ctr_ready);
|
||||
|
||||
/**
|
||||
* capi_ctr_reseted() - signal CAPI controller reset
|
||||
* capi_ctr_down() - signal CAPI controller not ready
|
||||
* @card: controller descriptor structure.
|
||||
*
|
||||
* Called by hardware driver to signal that the controller is down and
|
||||
* unavailable for use.
|
||||
*/
|
||||
|
||||
void capi_ctr_reseted(struct capi_ctr * card)
|
||||
void capi_ctr_down(struct capi_ctr * card)
|
||||
{
|
||||
u16 appl;
|
||||
|
||||
|
@ -413,7 +413,7 @@ void capi_ctr_reseted(struct capi_ctr * card)
|
|||
notify_push(KCI_CONTRDOWN, card->cnr, 0, 0);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(capi_ctr_reseted);
|
||||
EXPORT_SYMBOL(capi_ctr_down);
|
||||
|
||||
/**
|
||||
* capi_ctr_suspend_output() - suspend controller
|
||||
|
@ -517,7 +517,7 @@ EXPORT_SYMBOL(attach_capi_ctr);
|
|||
int detach_capi_ctr(struct capi_ctr *card)
|
||||
{
|
||||
if (card->cardstate != CARD_DETECTED)
|
||||
capi_ctr_reseted(card);
|
||||
capi_ctr_down(card);
|
||||
|
||||
ncards--;
|
||||
|
||||
|
|
|
@ -330,7 +330,7 @@ void b1_reset_ctr(struct capi_ctr *ctrl)
|
|||
spin_lock_irqsave(&card->lock, flags);
|
||||
capilib_release(&cinfo->ncci_head);
|
||||
spin_unlock_irqrestore(&card->lock, flags);
|
||||
capi_ctr_reseted(ctrl);
|
||||
capi_ctr_down(ctrl);
|
||||
}
|
||||
|
||||
void b1_register_appl(struct capi_ctr *ctrl,
|
||||
|
|
|
@ -759,7 +759,7 @@ void b1dma_reset_ctr(struct capi_ctr *ctrl)
|
|||
memset(cinfo->version, 0, sizeof(cinfo->version));
|
||||
capilib_release(&cinfo->ncci_head);
|
||||
spin_unlock_irqrestore(&card->lock, flags);
|
||||
capi_ctr_reseted(ctrl);
|
||||
capi_ctr_down(ctrl);
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------- */
|
||||
|
|
|
@ -681,7 +681,7 @@ static irqreturn_t c4_handle_interrupt(avmcard *card)
|
|||
spin_lock_irqsave(&card->lock, flags);
|
||||
capilib_release(&cinfo->ncci_head);
|
||||
spin_unlock_irqrestore(&card->lock, flags);
|
||||
capi_ctr_reseted(&cinfo->capi_ctrl);
|
||||
capi_ctr_down(&cinfo->capi_ctrl);
|
||||
}
|
||||
card->nlogcontr = 0;
|
||||
return IRQ_HANDLED;
|
||||
|
@ -909,7 +909,7 @@ static void c4_reset_ctr(struct capi_ctr *ctrl)
|
|||
for (i=0; i < card->nr_controllers; i++) {
|
||||
cinfo = &card->ctrlinfo[i];
|
||||
memset(cinfo->version, 0, sizeof(cinfo->version));
|
||||
capi_ctr_reseted(&cinfo->capi_ctrl);
|
||||
capi_ctr_down(&cinfo->capi_ctrl);
|
||||
}
|
||||
card->nlogcontr = 0;
|
||||
}
|
||||
|
|
|
@ -339,7 +339,7 @@ static void t1isa_reset_ctr(struct capi_ctr *ctrl)
|
|||
spin_lock_irqsave(&card->lock, flags);
|
||||
capilib_release(&cinfo->ncci_head);
|
||||
spin_unlock_irqrestore(&card->lock, flags);
|
||||
capi_ctr_reseted(ctrl);
|
||||
capi_ctr_down(ctrl);
|
||||
}
|
||||
|
||||
static void t1isa_remove(struct pci_dev *pdev)
|
||||
|
|
|
@ -67,7 +67,7 @@ hycapi_reset_ctr(struct capi_ctr *ctrl)
|
|||
printk(KERN_NOTICE "HYCAPI hycapi_reset_ctr\n");
|
||||
#endif
|
||||
capilib_release(&cinfo->ncci_head);
|
||||
capi_ctr_reseted(ctrl);
|
||||
capi_ctr_down(ctrl);
|
||||
}
|
||||
|
||||
/******************************
|
||||
|
@ -347,7 +347,7 @@ int hycapi_capi_stop(hysdn_card *card)
|
|||
if(cinfo) {
|
||||
ctrl = &cinfo->capi_ctrl;
|
||||
/* ctrl->suspend_output(ctrl); */
|
||||
capi_ctr_reseted(ctrl);
|
||||
capi_ctr_down(ctrl);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -480,9 +480,13 @@ static int pnp_registered;
|
|||
|
||||
#ifdef CONFIG_EISA
|
||||
static struct eisa_device_id el3_eisa_ids[] = {
|
||||
{ "TCM5090" },
|
||||
{ "TCM5091" },
|
||||
{ "TCM5092" },
|
||||
{ "TCM5093" },
|
||||
{ "TCM5094" },
|
||||
{ "TCM5095" },
|
||||
{ "TCM5098" },
|
||||
{ "" }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(eisa, el3_eisa_ids);
|
||||
|
|
|
@ -1001,7 +1001,7 @@ config SMC911X
|
|||
|
||||
config SMSC911X
|
||||
tristate "SMSC LAN911x/LAN921x families embedded ethernet support"
|
||||
depends on ARM || SUPERH
|
||||
depends on ARM || SUPERH || BLACKFIN
|
||||
select CRC32
|
||||
select MII
|
||||
select PHYLIB
|
||||
|
@ -1723,6 +1723,11 @@ config TLAN
|
|||
|
||||
Please email feedback to <torben.mathiasen@compaq.com>.
|
||||
|
||||
config KS8842
|
||||
tristate "Micrel KSZ8842"
|
||||
help
|
||||
This platform driver is for Micrel KSZ8842 chip.
|
||||
|
||||
config VIA_RHINE
|
||||
tristate "VIA Rhine support"
|
||||
depends on NET_PCI && PCI
|
||||
|
@ -1859,8 +1864,8 @@ config 68360_ENET
|
|||
the Motorola 68360 processor.
|
||||
|
||||
config FEC
|
||||
bool "FEC ethernet controller (of ColdFire CPUs)"
|
||||
depends on M523x || M527x || M5272 || M528x || M520x || M532x || MACH_MX27
|
||||
bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
|
||||
depends on M523x || M527x || M5272 || M528x || M520x || M532x || MACH_MX27 || ARCH_MX35
|
||||
help
|
||||
Say Y here if you want to use the built-in 10/100 Fast ethernet
|
||||
controller on some Motorola ColdFire and Freescale i.MX processors.
|
||||
|
@ -2720,6 +2725,8 @@ source "drivers/net/wan/Kconfig"
|
|||
|
||||
source "drivers/atm/Kconfig"
|
||||
|
||||
source "drivers/ieee802154/Kconfig"
|
||||
|
||||
source "drivers/s390/net/Kconfig"
|
||||
|
||||
config XEN_NETDEV_FRONTEND
|
||||
|
|
|
@ -86,6 +86,7 @@ obj-$(CONFIG_TC35815) += tc35815.o
|
|||
obj-$(CONFIG_SKGE) += skge.o
|
||||
obj-$(CONFIG_SKY2) += sky2.o
|
||||
obj-$(CONFIG_SKFP) += skfp/
|
||||
obj-$(CONFIG_KS8842) += ks8842.o
|
||||
obj-$(CONFIG_VIA_RHINE) += via-rhine.o
|
||||
obj-$(CONFIG_VIA_VELOCITY) += via-velocity.o
|
||||
obj-$(CONFIG_ADAPTEC_STARFIRE) += starfire.o
|
||||
|
@ -105,7 +106,7 @@ obj-$(CONFIG_HAMACHI) += hamachi.o
|
|||
obj-$(CONFIG_NET) += Space.o loopback.o
|
||||
obj-$(CONFIG_SEEQ8005) += seeq8005.o
|
||||
obj-$(CONFIG_NET_SB1000) += sb1000.o
|
||||
obj-$(CONFIG_MAC8390) += mac8390.o 8390.o
|
||||
obj-$(CONFIG_MAC8390) += mac8390.o
|
||||
obj-$(CONFIG_APNE) += apne.o 8390.o
|
||||
obj-$(CONFIG_PCMCIA_PCNET) += 8390.o
|
||||
obj-$(CONFIG_HP100) += hp100.o
|
||||
|
|
|
@ -2573,7 +2573,6 @@ restart:
|
|||
netif_wake_queue(dev);
|
||||
}
|
||||
|
||||
dev->trans_start = jiffies;
|
||||
return NETDEV_TX_OK;
|
||||
|
||||
overflow:
|
||||
|
|
|
@ -39,6 +39,7 @@
|
|||
static const char version[] = KERN_INFO "ipddp.c:v0.01 8/28/97 Bradford W. Johnson <johns393@maroon.tc.umn.edu>\n";
|
||||
|
||||
static struct ipddp_route *ipddp_route_list;
|
||||
static DEFINE_SPINLOCK(ipddp_route_lock);
|
||||
|
||||
#ifdef CONFIG_IPDDP_ENCAP
|
||||
static int ipddp_mode = IPDDP_ENCAP;
|
||||
|
@ -50,7 +51,7 @@ static int ipddp_mode = IPDDP_DECAP;
|
|||
static int ipddp_xmit(struct sk_buff *skb, struct net_device *dev);
|
||||
static int ipddp_create(struct ipddp_route *new_rt);
|
||||
static int ipddp_delete(struct ipddp_route *rt);
|
||||
static struct ipddp_route* ipddp_find_route(struct ipddp_route *rt);
|
||||
static struct ipddp_route* __ipddp_find_route(struct ipddp_route *rt);
|
||||
static int ipddp_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
|
||||
|
||||
static const struct net_device_ops ipddp_netdev_ops = {
|
||||
|
@ -114,11 +115,13 @@ static struct net_device * __init ipddp_init(void)
|
|||
*/
|
||||
static int ipddp_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
__be32 paddr = ((struct rtable*)skb->dst)->rt_gateway;
|
||||
__be32 paddr = skb_rtable(skb)->rt_gateway;
|
||||
struct ddpehdr *ddp;
|
||||
struct ipddp_route *rt;
|
||||
struct atalk_addr *our_addr;
|
||||
|
||||
spin_lock(&ipddp_route_lock);
|
||||
|
||||
/*
|
||||
* Find appropriate route to use, based only on IP number.
|
||||
*/
|
||||
|
@ -127,8 +130,10 @@ static int ipddp_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
if(rt->ip == paddr)
|
||||
break;
|
||||
}
|
||||
if(rt == NULL)
|
||||
if(rt == NULL) {
|
||||
spin_unlock(&ipddp_route_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
our_addr = atalk_find_dev_addr(rt->dev);
|
||||
|
||||
|
@ -174,6 +179,8 @@ static int ipddp_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
if(aarp_send_ddp(rt->dev, skb, &rt->at, NULL) < 0)
|
||||
dev_kfree_skb(skb);
|
||||
|
||||
spin_unlock(&ipddp_route_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -196,7 +203,9 @@ static int ipddp_create(struct ipddp_route *new_rt)
|
|||
return -ENETUNREACH;
|
||||
}
|
||||
|
||||
if (ipddp_find_route(rt)) {
|
||||
spin_lock_bh(&ipddp_route_lock);
|
||||
if (__ipddp_find_route(rt)) {
|
||||
spin_unlock_bh(&ipddp_route_lock);
|
||||
kfree(rt);
|
||||
return -EEXIST;
|
||||
}
|
||||
|
@ -204,6 +213,8 @@ static int ipddp_create(struct ipddp_route *new_rt)
|
|||
rt->next = ipddp_route_list;
|
||||
ipddp_route_list = rt;
|
||||
|
||||
spin_unlock_bh(&ipddp_route_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -216,6 +227,7 @@ static int ipddp_delete(struct ipddp_route *rt)
|
|||
struct ipddp_route **r = &ipddp_route_list;
|
||||
struct ipddp_route *tmp;
|
||||
|
||||
spin_lock_bh(&ipddp_route_lock);
|
||||
while((tmp = *r) != NULL)
|
||||
{
|
||||
if(tmp->ip == rt->ip
|
||||
|
@ -223,19 +235,21 @@ static int ipddp_delete(struct ipddp_route *rt)
|
|||
&& tmp->at.s_node == rt->at.s_node)
|
||||
{
|
||||
*r = tmp->next;
|
||||
spin_unlock_bh(&ipddp_route_lock);
|
||||
kfree(tmp);
|
||||
return 0;
|
||||
}
|
||||
r = &tmp->next;
|
||||
}
|
||||
|
||||
spin_unlock_bh(&ipddp_route_lock);
|
||||
return (-ENOENT);
|
||||
}
|
||||
|
||||
/*
|
||||
* Find a routing entry, we only return a FULL match
|
||||
*/
|
||||
static struct ipddp_route* ipddp_find_route(struct ipddp_route *rt)
|
||||
static struct ipddp_route* __ipddp_find_route(struct ipddp_route *rt)
|
||||
{
|
||||
struct ipddp_route *f;
|
||||
|
||||
|
@ -253,7 +267,7 @@ static struct ipddp_route* ipddp_find_route(struct ipddp_route *rt)
|
|||
static int ipddp_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
|
||||
{
|
||||
struct ipddp_route __user *rt = ifr->ifr_data;
|
||||
struct ipddp_route rcp;
|
||||
struct ipddp_route rcp, rcp2, *rp;
|
||||
|
||||
if(!capable(CAP_NET_ADMIN))
|
||||
return -EPERM;
|
||||
|
@ -267,9 +281,19 @@ static int ipddp_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
|
|||
return (ipddp_create(&rcp));
|
||||
|
||||
case SIOCFINDIPDDPRT:
|
||||
if(copy_to_user(rt, ipddp_find_route(&rcp), sizeof(struct ipddp_route)))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
spin_lock_bh(&ipddp_route_lock);
|
||||
rp = __ipddp_find_route(&rcp);
|
||||
if (rp)
|
||||
memcpy(&rcp2, rp, sizeof(rcp2));
|
||||
spin_unlock_bh(&ipddp_route_lock);
|
||||
|
||||
if (rp) {
|
||||
if (copy_to_user(rt, &rcp2,
|
||||
sizeof(struct ipddp_route)))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
} else
|
||||
return -ENOENT;
|
||||
|
||||
case SIOCDELIPDDPRT:
|
||||
return (ipddp_delete(&rcp));
|
||||
|
|
|
@ -253,7 +253,7 @@ static int ep93xx_rx(struct net_device *dev, int processed, int budget)
|
|||
skb = dev_alloc_skb(length + 2);
|
||||
if (likely(skb != NULL)) {
|
||||
skb_reserve(skb, 2);
|
||||
dma_sync_single(NULL, ep->descs->rdesc[entry].buf_addr,
|
||||
dma_sync_single_for_cpu(NULL, ep->descs->rdesc[entry].buf_addr,
|
||||
length, DMA_FROM_DEVICE);
|
||||
skb_copy_to_linear_data(skb, ep->rx_buf[entry], length);
|
||||
skb_put(skb, length);
|
||||
|
@ -331,7 +331,7 @@ static int ep93xx_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
ep->descs->tdesc[entry].tdesc1 =
|
||||
TDESC1_EOF | (entry << 16) | (skb->len & 0xfff);
|
||||
skb_copy_and_csum_dev(skb, ep->tx_buf[entry]);
|
||||
dma_sync_single(NULL, ep->descs->tdesc[entry].buf_addr,
|
||||
dma_sync_single_for_cpu(NULL, ep->descs->tdesc[entry].buf_addr,
|
||||
skb->len, DMA_TO_DEVICE);
|
||||
dev_kfree_skb(skb);
|
||||
|
||||
|
|
|
@ -561,8 +561,8 @@ static int eth_poll(struct napi_struct *napi, int budget)
|
|||
dma_unmap_single(&dev->dev, desc->data - NET_IP_ALIGN,
|
||||
RX_BUFF_SIZE, DMA_FROM_DEVICE);
|
||||
#else
|
||||
dma_sync_single(&dev->dev, desc->data - NET_IP_ALIGN,
|
||||
RX_BUFF_SIZE, DMA_FROM_DEVICE);
|
||||
dma_sync_single_for_cpu(&dev->dev, desc->data - NET_IP_ALIGN,
|
||||
RX_BUFF_SIZE, DMA_FROM_DEVICE);
|
||||
memcpy_swab32((u32 *)skb->data, (u32 *)port->rx_buff_tab[n],
|
||||
ALIGN(NET_IP_ALIGN + desc->pkt_len, 4) / 4);
|
||||
#endif
|
||||
|
|
|
@ -271,7 +271,7 @@ static int atl1c_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
|
|||
struct atl1c_adapter *adapter = netdev_priv(netdev);
|
||||
|
||||
if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE |
|
||||
WAKE_MCAST | WAKE_BCAST | WAKE_MCAST))
|
||||
WAKE_UCAST | WAKE_BCAST | WAKE_MCAST))
|
||||
return -EOPNOTSUPP;
|
||||
/* these settings will always override what we currently have */
|
||||
adapter->wol = 0;
|
||||
|
|
|
@ -163,6 +163,24 @@ static inline void atl1c_irq_reset(struct atl1c_adapter *adapter)
|
|||
atl1c_irq_enable(adapter);
|
||||
}
|
||||
|
||||
/*
|
||||
* atl1c_wait_until_idle - wait up to AT_HW_MAX_IDLE_DELAY reads
|
||||
* of the idle status register until the device is actually idle
|
||||
*/
|
||||
static u32 atl1c_wait_until_idle(struct atl1c_hw *hw)
|
||||
{
|
||||
int timeout;
|
||||
u32 data;
|
||||
|
||||
for (timeout = 0; timeout < AT_HW_MAX_IDLE_DELAY; timeout++) {
|
||||
AT_READ_REG(hw, REG_IDLE_STATUS, &data);
|
||||
if ((data & IDLE_STATUS_MASK) == 0)
|
||||
return 0;
|
||||
msleep(1);
|
||||
}
|
||||
return data;
|
||||
}
|
||||
|
||||
/*
|
||||
* atl1c_phy_config - Timer Call-back
|
||||
* @data: pointer to netdev cast into an unsigned long
|
||||
|
@ -1106,7 +1124,6 @@ static void atl1c_configure_dma(struct atl1c_adapter *adapter)
|
|||
static int atl1c_stop_mac(struct atl1c_hw *hw)
|
||||
{
|
||||
u32 data;
|
||||
int timeout;
|
||||
|
||||
AT_READ_REG(hw, REG_RXQ_CTRL, &data);
|
||||
data &= ~(RXQ1_CTRL_EN | RXQ2_CTRL_EN |
|
||||
|
@ -1117,25 +1134,13 @@ static int atl1c_stop_mac(struct atl1c_hw *hw)
|
|||
data &= ~TXQ_CTRL_EN;
|
||||
AT_WRITE_REG(hw, REG_TWSI_CTRL, data);
|
||||
|
||||
for (timeout = 0; timeout < AT_HW_MAX_IDLE_DELAY; timeout++) {
|
||||
AT_READ_REG(hw, REG_IDLE_STATUS, &data);
|
||||
if ((data & (IDLE_STATUS_RXQ_NO_IDLE |
|
||||
IDLE_STATUS_TXQ_NO_IDLE)) == 0)
|
||||
break;
|
||||
msleep(1);
|
||||
}
|
||||
atl1c_wait_until_idle(hw);
|
||||
|
||||
AT_READ_REG(hw, REG_MAC_CTRL, &data);
|
||||
data &= ~(MAC_CTRL_TX_EN | MAC_CTRL_RX_EN);
|
||||
AT_WRITE_REG(hw, REG_MAC_CTRL, data);
|
||||
|
||||
for (timeout = 0; timeout < AT_HW_MAX_IDLE_DELAY; timeout++) {
|
||||
AT_READ_REG(hw, REG_IDLE_STATUS, &data);
|
||||
if ((data & IDLE_STATUS_MASK) == 0)
|
||||
return 0;
|
||||
msleep(1);
|
||||
}
|
||||
return data;
|
||||
return (int)atl1c_wait_until_idle(hw);
|
||||
}
|
||||
|
||||
static void atl1c_enable_rx_ctrl(struct atl1c_hw *hw)
|
||||
|
@ -1178,8 +1183,6 @@ static int atl1c_reset_mac(struct atl1c_hw *hw)
|
|||
{
|
||||
struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter;
|
||||
struct pci_dev *pdev = adapter->pdev;
|
||||
u32 idle_status_data = 0;
|
||||
int timeout = 0;
|
||||
int ret;
|
||||
|
||||
AT_WRITE_REG(hw, REG_IMR, 0);
|
||||
|
@ -1198,15 +1201,10 @@ static int atl1c_reset_mac(struct atl1c_hw *hw)
|
|||
AT_WRITE_FLUSH(hw);
|
||||
msleep(10);
|
||||
/* Wait at least 10ms for All module to be Idle */
|
||||
for (timeout = 0; timeout < AT_HW_MAX_IDLE_DELAY; timeout++) {
|
||||
AT_READ_REG(hw, REG_IDLE_STATUS, &idle_status_data);
|
||||
if ((idle_status_data & IDLE_STATUS_MASK) == 0)
|
||||
break;
|
||||
msleep(1);
|
||||
}
|
||||
if (timeout >= AT_HW_MAX_IDLE_DELAY) {
|
||||
|
||||
if (atl1c_wait_until_idle(hw)) {
|
||||
dev_err(&pdev->dev,
|
||||
"MAC state machine cann't be idle since"
|
||||
"MAC state machine can't be idle since"
|
||||
" disabled for 10ms second\n");
|
||||
return -1;
|
||||
}
|
||||
|
@ -2113,7 +2111,6 @@ static int atl1c_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
|
|||
atl1c_tx_map(adapter, skb, tpd, type);
|
||||
atl1c_tx_queue(adapter, skb, tpd, type);
|
||||
|
||||
netdev->trans_start = jiffies;
|
||||
spin_unlock_irqrestore(&adapter->tx_lock, flags);
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
|
|
@ -37,6 +37,7 @@ char atl1e_driver_version[] = DRV_VERSION;
|
|||
*/
|
||||
static struct pci_device_id atl1e_pci_tbl[] = {
|
||||
{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1E)},
|
||||
{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, 0x1066)},
|
||||
/* required last entry */
|
||||
{ 0 }
|
||||
};
|
||||
|
@ -1893,7 +1894,7 @@ static int atl1e_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
|
|||
atl1e_tx_map(adapter, skb, tpd);
|
||||
atl1e_tx_queue(adapter, tpd_req, tpd);
|
||||
|
||||
netdev->trans_start = jiffies;
|
||||
netdev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
|
||||
spin_unlock_irqrestore(&adapter->tx_lock, flags);
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
|
|
@ -82,6 +82,12 @@
|
|||
|
||||
#include "atl1.h"
|
||||
|
||||
#define ATLX_DRIVER_VERSION "2.1.3"
|
||||
MODULE_AUTHOR("Xiong Huang <xiong.huang@atheros.com>, \
|
||||
Chris Snook <csnook@redhat.com>, Jay Cliburn <jcliburn@gmail.com>");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_VERSION(ATLX_DRIVER_VERSION);
|
||||
|
||||
/* Temporary hack for merging atl1 and atl2 */
|
||||
#include "atlx.c"
|
||||
|
||||
|
@ -2431,7 +2437,6 @@ static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
|
|||
atl1_tx_queue(adapter, count, ptpd);
|
||||
atl1_update_mailbox(adapter);
|
||||
mmiowb();
|
||||
netdev->trans_start = jiffies;
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
|
|
|
@ -29,12 +29,6 @@
|
|||
#include <linux/module.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#define ATLX_DRIVER_VERSION "2.1.3"
|
||||
MODULE_AUTHOR("Xiong Huang <xiong.huang@atheros.com>, \
|
||||
Chris Snook <csnook@redhat.com>, Jay Cliburn <jcliburn@gmail.com>");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_VERSION(ATLX_DRIVER_VERSION);
|
||||
|
||||
#define ATLX_ERR_PHY 2
|
||||
#define ATLX_ERR_PHY_SPEED 7
|
||||
#define ATLX_ERR_PHY_RES 8
|
||||
|
|
|
@ -782,7 +782,7 @@ static int b44_rx(struct b44 *bp, int budget)
|
|||
drop_it:
|
||||
b44_recycle_rx(bp, cons, bp->rx_prod);
|
||||
drop_it_no_recycle:
|
||||
bp->stats.rx_dropped++;
|
||||
bp->dev->stats.rx_dropped++;
|
||||
goto next_pkt;
|
||||
}
|
||||
|
||||
|
@ -1647,7 +1647,7 @@ static int b44_close(struct net_device *dev)
|
|||
static struct net_device_stats *b44_get_stats(struct net_device *dev)
|
||||
{
|
||||
struct b44 *bp = netdev_priv(dev);
|
||||
struct net_device_stats *nstat = &bp->stats;
|
||||
struct net_device_stats *nstat = &dev->stats;
|
||||
struct b44_hw_stats *hwstat = &bp->hw_stats;
|
||||
|
||||
/* Convert HW stats into netdevice stats. */
|
||||
|
|
|
@ -384,7 +384,6 @@ struct b44 {
|
|||
|
||||
struct timer_list timer;
|
||||
|
||||
struct net_device_stats stats;
|
||||
struct b44_hw_stats hw_stats;
|
||||
|
||||
struct ssb_device *sdev;
|
||||
|
|
|
@ -168,6 +168,7 @@ static void netdev_stats_update(struct be_adapter *adapter)
|
|||
struct be_port_rxf_stats *port_stats =
|
||||
&rxf_stats->port[adapter->port_num];
|
||||
struct net_device_stats *dev_stats = &adapter->stats.net_stats;
|
||||
struct be_erx_stats *erx_stats = &hw_stats->erx;
|
||||
|
||||
dev_stats->rx_packets = port_stats->rx_total_frames;
|
||||
dev_stats->tx_packets = port_stats->tx_unicastframes +
|
||||
|
@ -181,29 +182,33 @@ static void netdev_stats_update(struct be_adapter *adapter)
|
|||
dev_stats->rx_errors = port_stats->rx_crc_errors +
|
||||
port_stats->rx_alignment_symbol_errors +
|
||||
port_stats->rx_in_range_errors +
|
||||
port_stats->rx_out_range_errors + port_stats->rx_frame_too_long;
|
||||
port_stats->rx_out_range_errors +
|
||||
port_stats->rx_frame_too_long +
|
||||
port_stats->rx_dropped_too_small +
|
||||
port_stats->rx_dropped_too_short +
|
||||
port_stats->rx_dropped_header_too_small +
|
||||
port_stats->rx_dropped_tcp_length +
|
||||
port_stats->rx_dropped_runt +
|
||||
port_stats->rx_tcp_checksum_errs +
|
||||
port_stats->rx_ip_checksum_errs +
|
||||
port_stats->rx_udp_checksum_errs;
|
||||
|
||||
/* packet transmit problems */
|
||||
dev_stats->tx_errors = 0;
|
||||
|
||||
/* no space in linux buffers */
|
||||
dev_stats->rx_dropped = 0;
|
||||
|
||||
/* no space available in linux */
|
||||
dev_stats->tx_dropped = 0;
|
||||
|
||||
dev_stats->multicast = port_stats->tx_multicastframes;
|
||||
dev_stats->collisions = 0;
|
||||
/* no space in linux buffers: best possible approximation */
|
||||
dev_stats->rx_dropped = erx_stats->rx_drops_no_fragments[0];
|
||||
|
||||
/* detailed rx errors */
|
||||
dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
|
||||
port_stats->rx_out_range_errors + port_stats->rx_frame_too_long;
|
||||
port_stats->rx_out_range_errors +
|
||||
port_stats->rx_frame_too_long;
|
||||
|
||||
/* receive ring buffer overflow */
|
||||
dev_stats->rx_over_errors = 0;
|
||||
|
||||
dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
|
||||
|
||||
/* frame alignment errors */
|
||||
dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
|
||||
|
||||
/* receiver fifo overrun */
|
||||
/* drops_no_pbuf is no per i/f, it's per BE card */
|
||||
dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
|
||||
|
@ -211,6 +216,16 @@ static void netdev_stats_update(struct be_adapter *adapter)
|
|||
rxf_stats->rx_drops_no_pbuf;
|
||||
/* receiver missed packetd */
|
||||
dev_stats->rx_missed_errors = 0;
|
||||
|
||||
/* packet transmit problems */
|
||||
dev_stats->tx_errors = 0;
|
||||
|
||||
/* no space available in linux */
|
||||
dev_stats->tx_dropped = 0;
|
||||
|
||||
dev_stats->multicast = port_stats->tx_multicastframes;
|
||||
dev_stats->collisions = 0;
|
||||
|
||||
/* detailed tx_errors */
|
||||
dev_stats->tx_aborted_errors = 0;
|
||||
dev_stats->tx_carrier_errors = 0;
|
||||
|
@ -337,13 +352,10 @@ static void be_tx_stats_update(struct be_adapter *adapter,
|
|||
/* Determine number of WRB entries needed to xmit data in an skb */
|
||||
static u32 wrb_cnt_for_skb(struct sk_buff *skb, bool *dummy)
|
||||
{
|
||||
int cnt = 0;
|
||||
while (skb) {
|
||||
if (skb->len > skb->data_len)
|
||||
cnt++;
|
||||
cnt += skb_shinfo(skb)->nr_frags;
|
||||
skb = skb_shinfo(skb)->frag_list;
|
||||
}
|
||||
int cnt = (skb->len > skb->data_len);
|
||||
|
||||
cnt += skb_shinfo(skb)->nr_frags;
|
||||
|
||||
/* to account for hdr wrb */
|
||||
cnt++;
|
||||
if (cnt & 1) {
|
||||
|
@ -409,31 +421,28 @@ static int make_tx_wrbs(struct be_adapter *adapter,
|
|||
hdr = queue_head_node(txq);
|
||||
queue_head_inc(txq);
|
||||
|
||||
while (skb) {
|
||||
if (skb->len > skb->data_len) {
|
||||
int len = skb->len - skb->data_len;
|
||||
busaddr = pci_map_single(pdev, skb->data, len,
|
||||
PCI_DMA_TODEVICE);
|
||||
wrb = queue_head_node(txq);
|
||||
wrb_fill(wrb, busaddr, len);
|
||||
be_dws_cpu_to_le(wrb, sizeof(*wrb));
|
||||
queue_head_inc(txq);
|
||||
copied += len;
|
||||
}
|
||||
if (skb->len > skb->data_len) {
|
||||
int len = skb->len - skb->data_len;
|
||||
busaddr = pci_map_single(pdev, skb->data, len,
|
||||
PCI_DMA_TODEVICE);
|
||||
wrb = queue_head_node(txq);
|
||||
wrb_fill(wrb, busaddr, len);
|
||||
be_dws_cpu_to_le(wrb, sizeof(*wrb));
|
||||
queue_head_inc(txq);
|
||||
copied += len;
|
||||
}
|
||||
|
||||
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
|
||||
struct skb_frag_struct *frag =
|
||||
&skb_shinfo(skb)->frags[i];
|
||||
busaddr = pci_map_page(pdev, frag->page,
|
||||
frag->page_offset,
|
||||
frag->size, PCI_DMA_TODEVICE);
|
||||
wrb = queue_head_node(txq);
|
||||
wrb_fill(wrb, busaddr, frag->size);
|
||||
be_dws_cpu_to_le(wrb, sizeof(*wrb));
|
||||
queue_head_inc(txq);
|
||||
copied += frag->size;
|
||||
}
|
||||
skb = skb_shinfo(skb)->frag_list;
|
||||
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
|
||||
struct skb_frag_struct *frag =
|
||||
&skb_shinfo(skb)->frags[i];
|
||||
busaddr = pci_map_page(pdev, frag->page,
|
||||
frag->page_offset,
|
||||
frag->size, PCI_DMA_TODEVICE);
|
||||
wrb = queue_head_node(txq);
|
||||
wrb_fill(wrb, busaddr, frag->size);
|
||||
be_dws_cpu_to_le(wrb, sizeof(*wrb));
|
||||
queue_head_inc(txq);
|
||||
copied += frag->size;
|
||||
}
|
||||
|
||||
if (dummy_wrb) {
|
||||
|
@ -478,8 +487,6 @@ static int be_xmit(struct sk_buff *skb, struct net_device *netdev)
|
|||
|
||||
be_txq_notify(&adapter->ctrl, txq->id, wrb_cnt);
|
||||
|
||||
netdev->trans_start = jiffies;
|
||||
|
||||
be_tx_stats_update(adapter, wrb_cnt, copied, stopped);
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
@ -736,7 +743,7 @@ static void skb_fill_rx_data(struct be_adapter *adapter,
|
|||
|
||||
if (pktsize <= rx_frag_size) {
|
||||
BUG_ON(num_rcvd != 1);
|
||||
return;
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* More frags present for this completion */
|
||||
|
@ -758,6 +765,7 @@ static void skb_fill_rx_data(struct be_adapter *adapter,
|
|||
memset(page_info, 0, sizeof(*page_info));
|
||||
}
|
||||
|
||||
done:
|
||||
be_rx_stats_update(adapter, pktsize, num_rcvd);
|
||||
return;
|
||||
}
|
||||
|
@ -868,12 +876,19 @@ static struct be_eth_rx_compl *be_rx_compl_get(struct be_adapter *adapter)
|
|||
|
||||
be_dws_le_to_cpu(rxcp, sizeof(*rxcp));
|
||||
|
||||
rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0;
|
||||
|
||||
queue_tail_inc(&adapter->rx_obj.cq);
|
||||
return rxcp;
|
||||
}
|
||||
|
||||
/* To reset the valid bit, we need to reset the whole word as
|
||||
* when walking the queue the valid entries are little-endian
|
||||
* and invalid entries are host endian
|
||||
*/
|
||||
static inline void be_rx_compl_reset(struct be_eth_rx_compl *rxcp)
|
||||
{
|
||||
rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0;
|
||||
}
|
||||
|
||||
static inline struct page *be_alloc_pages(u32 size)
|
||||
{
|
||||
gfp_t alloc_flags = GFP_ATOMIC;
|
||||
|
@ -1005,6 +1020,7 @@ static void be_rx_q_clean(struct be_adapter *adapter)
|
|||
/* First cleanup pending rx completions */
|
||||
while ((rxcp = be_rx_compl_get(adapter)) != NULL) {
|
||||
be_rx_compl_discard(adapter, rxcp);
|
||||
be_rx_compl_reset(rxcp);
|
||||
be_cq_notify(&adapter->ctrl, rx_cq->id, true, 1);
|
||||
}
|
||||
|
||||
|
@ -1040,8 +1056,13 @@ static void be_tx_queues_destroy(struct be_adapter *adapter)
|
|||
struct be_queue_info *q;
|
||||
|
||||
q = &adapter->tx_obj.q;
|
||||
if (q->created)
|
||||
if (q->created) {
|
||||
be_cmd_q_destroy(&adapter->ctrl, q, QTYPE_TXQ);
|
||||
|
||||
/* No more tx completions can be rcvd now; clean up if there
|
||||
* are any pending completions or pending tx requests */
|
||||
be_tx_q_clean(adapter);
|
||||
}
|
||||
be_queue_free(adapter, q);
|
||||
|
||||
q = &adapter->tx_obj.cq;
|
||||
|
@ -1049,10 +1070,6 @@ static void be_tx_queues_destroy(struct be_adapter *adapter)
|
|||
be_cmd_q_destroy(&adapter->ctrl, q, QTYPE_CQ);
|
||||
be_queue_free(adapter, q);
|
||||
|
||||
/* No more tx completions can be rcvd now; clean up if there are
|
||||
* any pending completions or pending tx requests */
|
||||
be_tx_q_clean(adapter);
|
||||
|
||||
q = &adapter->tx_eq.q;
|
||||
if (q->created)
|
||||
be_cmd_q_destroy(&adapter->ctrl, q, QTYPE_EQ);
|
||||
|
@ -1286,6 +1303,8 @@ int be_poll_rx(struct napi_struct *napi, int budget)
|
|||
be_rx_compl_process_lro(adapter, rxcp);
|
||||
else
|
||||
be_rx_compl_process(adapter, rxcp);
|
||||
|
||||
be_rx_compl_reset(rxcp);
|
||||
}
|
||||
|
||||
lro_flush_all(&adapter->rx_obj.lro_mgr);
|
||||
|
@ -1541,7 +1560,7 @@ static int be_close(struct net_device *netdev)
|
|||
struct be_eq_obj *tx_eq = &adapter->tx_eq;
|
||||
int vec;
|
||||
|
||||
cancel_delayed_work(&adapter->work);
|
||||
cancel_delayed_work_sync(&adapter->work);
|
||||
|
||||
netif_stop_queue(netdev);
|
||||
netif_carrier_off(netdev);
|
||||
|
|
|
@ -194,13 +194,13 @@ static int desc_list_init(void)
|
|||
struct dma_descriptor *b = &(r->desc_b);
|
||||
|
||||
/* allocate a new skb for next time receive */
|
||||
new_skb = dev_alloc_skb(PKT_BUF_SZ + 2);
|
||||
new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN);
|
||||
if (!new_skb) {
|
||||
printk(KERN_NOTICE DRV_NAME
|
||||
": init: low on mem - packet dropped\n");
|
||||
goto init_error;
|
||||
}
|
||||
skb_reserve(new_skb, 2);
|
||||
skb_reserve(new_skb, NET_IP_ALIGN);
|
||||
r->skb = new_skb;
|
||||
|
||||
/*
|
||||
|
@ -566,9 +566,9 @@ static void adjust_tx_list(void)
|
|||
*/
|
||||
if (current_tx_ptr->next->next == tx_list_head) {
|
||||
while (tx_list_head->status.status_word == 0) {
|
||||
mdelay(1);
|
||||
udelay(10);
|
||||
if (tx_list_head->status.status_word != 0
|
||||
|| !(bfin_read_DMA2_IRQ_STATUS() & 0x08)) {
|
||||
|| !(bfin_read_DMA2_IRQ_STATUS() & DMA_RUN)) {
|
||||
goto adjust_head;
|
||||
}
|
||||
if (timeout_cnt-- < 0) {
|
||||
|
@ -606,93 +606,41 @@ static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
|
|||
struct net_device *dev)
|
||||
{
|
||||
u16 *data;
|
||||
|
||||
u32 data_align = (unsigned long)(skb->data) & 0x3;
|
||||
current_tx_ptr->skb = skb;
|
||||
|
||||
if (ANOMALY_05000285) {
|
||||
/*
|
||||
* TXDWA feature is not avaible to older revision < 0.3 silicon
|
||||
* of BF537
|
||||
*
|
||||
* Only if data buffer is ODD WORD alignment, we do not
|
||||
* need to memcpy
|
||||
*/
|
||||
u32 data_align = (u32)(skb->data) & 0x3;
|
||||
if (data_align == 0x2) {
|
||||
/* move skb->data to current_tx_ptr payload */
|
||||
data = (u16 *)(skb->data) - 1;
|
||||
*data = (u16)(skb->len);
|
||||
current_tx_ptr->desc_a.start_addr = (u32)data;
|
||||
/* this is important! */
|
||||
blackfin_dcache_flush_range((u32)data,
|
||||
(u32)((u8 *)data + skb->len + 4));
|
||||
} else {
|
||||
*((u16 *)(current_tx_ptr->packet)) = (u16)(skb->len);
|
||||
memcpy((u8 *)(current_tx_ptr->packet + 2), skb->data,
|
||||
skb->len);
|
||||
current_tx_ptr->desc_a.start_addr =
|
||||
(u32)current_tx_ptr->packet;
|
||||
if (current_tx_ptr->status.status_word != 0)
|
||||
current_tx_ptr->status.status_word = 0;
|
||||
blackfin_dcache_flush_range(
|
||||
(u32)current_tx_ptr->packet,
|
||||
(u32)(current_tx_ptr->packet + skb->len + 2));
|
||||
}
|
||||
if (data_align == 0x2) {
|
||||
/* move skb->data to current_tx_ptr payload */
|
||||
data = (u16 *)(skb->data) - 1;
|
||||
*data = (u16)(skb->len);
|
||||
current_tx_ptr->desc_a.start_addr = (u32)data;
|
||||
/* this is important! */
|
||||
blackfin_dcache_flush_range((u32)data,
|
||||
(u32)((u8 *)data + skb->len + 4));
|
||||
} else {
|
||||
/*
|
||||
* TXDWA feature is avaible to revision < 0.3 silicon of
|
||||
* BF537 and always avaible to BF52x
|
||||
*/
|
||||
u32 data_align = (u32)(skb->data) & 0x3;
|
||||
if (data_align == 0x0) {
|
||||
u16 sysctl = bfin_read_EMAC_SYSCTL();
|
||||
sysctl |= TXDWA;
|
||||
bfin_write_EMAC_SYSCTL(sysctl);
|
||||
|
||||
/* move skb->data to current_tx_ptr payload */
|
||||
data = (u16 *)(skb->data) - 2;
|
||||
*data = (u16)(skb->len);
|
||||
current_tx_ptr->desc_a.start_addr = (u32)data;
|
||||
/* this is important! */
|
||||
blackfin_dcache_flush_range(
|
||||
(u32)data,
|
||||
(u32)((u8 *)data + skb->len + 4));
|
||||
} else if (data_align == 0x2) {
|
||||
u16 sysctl = bfin_read_EMAC_SYSCTL();
|
||||
sysctl &= ~TXDWA;
|
||||
bfin_write_EMAC_SYSCTL(sysctl);
|
||||
|
||||
/* move skb->data to current_tx_ptr payload */
|
||||
data = (u16 *)(skb->data) - 1;
|
||||
*data = (u16)(skb->len);
|
||||
current_tx_ptr->desc_a.start_addr = (u32)data;
|
||||
/* this is important! */
|
||||
blackfin_dcache_flush_range(
|
||||
(u32)data,
|
||||
(u32)((u8 *)data + skb->len + 4));
|
||||
} else {
|
||||
u16 sysctl = bfin_read_EMAC_SYSCTL();
|
||||
sysctl &= ~TXDWA;
|
||||
bfin_write_EMAC_SYSCTL(sysctl);
|
||||
|
||||
*((u16 *)(current_tx_ptr->packet)) = (u16)(skb->len);
|
||||
memcpy((u8 *)(current_tx_ptr->packet + 2), skb->data,
|
||||
skb->len);
|
||||
current_tx_ptr->desc_a.start_addr =
|
||||
(u32)current_tx_ptr->packet;
|
||||
if (current_tx_ptr->status.status_word != 0)
|
||||
current_tx_ptr->status.status_word = 0;
|
||||
blackfin_dcache_flush_range(
|
||||
(u32)current_tx_ptr->packet,
|
||||
(u32)(current_tx_ptr->packet + skb->len + 2));
|
||||
}
|
||||
*((u16 *)(current_tx_ptr->packet)) = (u16)(skb->len);
|
||||
memcpy((u8 *)(current_tx_ptr->packet + 2), skb->data,
|
||||
skb->len);
|
||||
current_tx_ptr->desc_a.start_addr =
|
||||
(u32)current_tx_ptr->packet;
|
||||
if (current_tx_ptr->status.status_word != 0)
|
||||
current_tx_ptr->status.status_word = 0;
|
||||
blackfin_dcache_flush_range(
|
||||
(u32)current_tx_ptr->packet,
|
||||
(u32)(current_tx_ptr->packet + skb->len + 2));
|
||||
}
|
||||
|
||||
/* make sure the internal data buffers in the core are drained
|
||||
* so that the DMA descriptors are completely written when the
|
||||
* DMA engine goes to fetch them below
|
||||
*/
|
||||
SSYNC();
|
||||
|
||||
/* enable this packet's dma */
|
||||
current_tx_ptr->desc_a.config |= DMAEN;
|
||||
|
||||
/* tx dma is running, just return */
|
||||
if (bfin_read_DMA2_IRQ_STATUS() & 0x08)
|
||||
if (bfin_read_DMA2_IRQ_STATUS() & DMA_RUN)
|
||||
goto out;
|
||||
|
||||
/* tx dma is not running */
|
||||
|
@ -718,7 +666,7 @@ static void bfin_mac_rx(struct net_device *dev)
|
|||
|
||||
/* allocate a new skb for next time receive */
|
||||
skb = current_rx_ptr->skb;
|
||||
new_skb = dev_alloc_skb(PKT_BUF_SZ + 2);
|
||||
new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN);
|
||||
if (!new_skb) {
|
||||
printk(KERN_NOTICE DRV_NAME
|
||||
": rx: low on mem - packet dropped\n");
|
||||
|
@ -726,7 +674,7 @@ static void bfin_mac_rx(struct net_device *dev)
|
|||
goto out;
|
||||
}
|
||||
/* reserve 2 bytes for RXDWA padding */
|
||||
skb_reserve(new_skb, 2);
|
||||
skb_reserve(new_skb, NET_IP_ALIGN);
|
||||
current_rx_ptr->skb = new_skb;
|
||||
current_rx_ptr->desc_a.start_addr = (unsigned long)new_skb->data - 2;
|
||||
|
||||
|
@ -979,22 +927,7 @@ static int bfin_mac_open(struct net_device *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static const struct net_device_ops bfin_mac_netdev_ops = {
|
||||
.ndo_open = bfin_mac_open,
|
||||
.ndo_stop = bfin_mac_close,
|
||||
.ndo_start_xmit = bfin_mac_hard_start_xmit,
|
||||
.ndo_set_mac_address = bfin_mac_set_mac_address,
|
||||
.ndo_tx_timeout = bfin_mac_timeout,
|
||||
.ndo_set_multicast_list = bfin_mac_set_multicast_list,
|
||||
.ndo_validate_addr = eth_validate_addr,
|
||||
.ndo_change_mtu = eth_change_mtu,
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
.ndo_poll_controller = bfin_mac_poll,
|
||||
#endif
|
||||
};
|
||||
|
||||
/*
|
||||
*
|
||||
* this makes the board clean up everything that it can
|
||||
* and not talk to the outside world. Caused by
|
||||
* an 'ifconfig ethX down'
|
||||
|
@ -1019,11 +952,26 @@ static int bfin_mac_close(struct net_device *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static const struct net_device_ops bfin_mac_netdev_ops = {
|
||||
.ndo_open = bfin_mac_open,
|
||||
.ndo_stop = bfin_mac_close,
|
||||
.ndo_start_xmit = bfin_mac_hard_start_xmit,
|
||||
.ndo_set_mac_address = bfin_mac_set_mac_address,
|
||||
.ndo_tx_timeout = bfin_mac_timeout,
|
||||
.ndo_set_multicast_list = bfin_mac_set_multicast_list,
|
||||
.ndo_validate_addr = eth_validate_addr,
|
||||
.ndo_change_mtu = eth_change_mtu,
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
.ndo_poll_controller = bfin_mac_poll,
|
||||
#endif
|
||||
};
|
||||
|
||||
static int __devinit bfin_mac_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct net_device *ndev;
|
||||
struct bfin_mac_local *lp;
|
||||
int rc, i;
|
||||
struct platform_device *pd;
|
||||
int rc;
|
||||
|
||||
ndev = alloc_etherdev(sizeof(struct bfin_mac_local));
|
||||
if (!ndev) {
|
||||
|
@ -1048,13 +996,6 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev)
|
|||
goto out_err_probe_mac;
|
||||
}
|
||||
|
||||
/* set the GPIO pins to Ethernet mode */
|
||||
rc = peripheral_request_list(pin_req, DRV_NAME);
|
||||
if (rc) {
|
||||
dev_err(&pdev->dev, "Requesting peripherals failed!\n");
|
||||
rc = -EFAULT;
|
||||
goto out_err_setup_pin_mux;
|
||||
}
|
||||
|
||||
/*
|
||||
* Is it valid? (Did bootloader initialize it?)
|
||||
|
@ -1070,26 +1011,14 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev)
|
|||
|
||||
setup_mac_addr(ndev->dev_addr);
|
||||
|
||||
/* MDIO bus initial */
|
||||
lp->mii_bus = mdiobus_alloc();
|
||||
if (lp->mii_bus == NULL)
|
||||
goto out_err_mdiobus_alloc;
|
||||
|
||||
lp->mii_bus->priv = ndev;
|
||||
lp->mii_bus->read = bfin_mdiobus_read;
|
||||
lp->mii_bus->write = bfin_mdiobus_write;
|
||||
lp->mii_bus->reset = bfin_mdiobus_reset;
|
||||
lp->mii_bus->name = "bfin_mac_mdio";
|
||||
snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "0");
|
||||
lp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
|
||||
for (i = 0; i < PHY_MAX_ADDR; ++i)
|
||||
lp->mii_bus->irq[i] = PHY_POLL;
|
||||
|
||||
rc = mdiobus_register(lp->mii_bus);
|
||||
if (rc) {
|
||||
dev_err(&pdev->dev, "Cannot register MDIO bus!\n");
|
||||
goto out_err_mdiobus_register;
|
||||
if (!pdev->dev.platform_data) {
|
||||
dev_err(&pdev->dev, "Cannot get platform device bfin_mii_bus!\n");
|
||||
rc = -ENODEV;
|
||||
goto out_err_probe_mac;
|
||||
}
|
||||
pd = pdev->dev.platform_data;
|
||||
lp->mii_bus = platform_get_drvdata(pd);
|
||||
lp->mii_bus->priv = ndev;
|
||||
|
||||
rc = mii_probe(ndev);
|
||||
if (rc) {
|
||||
|
@ -1108,7 +1037,7 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev)
|
|||
/* now, enable interrupts */
|
||||
/* register irq handler */
|
||||
rc = request_irq(IRQ_MAC_RX, bfin_mac_interrupt,
|
||||
IRQF_DISABLED | IRQF_SHARED, "EMAC_RX", ndev);
|
||||
IRQF_DISABLED, "EMAC_RX", ndev);
|
||||
if (rc) {
|
||||
dev_err(&pdev->dev, "Cannot request Blackfin MAC RX IRQ!\n");
|
||||
rc = -EBUSY;
|
||||
|
@ -1131,11 +1060,8 @@ out_err_reg_ndev:
|
|||
out_err_request_irq:
|
||||
out_err_mii_probe:
|
||||
mdiobus_unregister(lp->mii_bus);
|
||||
out_err_mdiobus_register:
|
||||
mdiobus_free(lp->mii_bus);
|
||||
out_err_mdiobus_alloc:
|
||||
peripheral_free_list(pin_req);
|
||||
out_err_setup_pin_mux:
|
||||
out_err_probe_mac:
|
||||
platform_set_drvdata(pdev, NULL);
|
||||
free_netdev(ndev);
|
||||
|
@ -1150,8 +1076,7 @@ static int __devexit bfin_mac_remove(struct platform_device *pdev)
|
|||
|
||||
platform_set_drvdata(pdev, NULL);
|
||||
|
||||
mdiobus_unregister(lp->mii_bus);
|
||||
mdiobus_free(lp->mii_bus);
|
||||
lp->mii_bus->priv = NULL;
|
||||
|
||||
unregister_netdev(ndev);
|
||||
|
||||
|
@ -1189,6 +1114,74 @@ static int bfin_mac_resume(struct platform_device *pdev)
|
|||
#define bfin_mac_resume NULL
|
||||
#endif /* CONFIG_PM */
|
||||
|
||||
static int __devinit bfin_mii_bus_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct mii_bus *miibus;
|
||||
int rc, i;
|
||||
|
||||
/*
|
||||
* We are setting up a network card,
|
||||
* so set the GPIO pins to Ethernet mode
|
||||
*/
|
||||
rc = peripheral_request_list(pin_req, DRV_NAME);
|
||||
if (rc) {
|
||||
dev_err(&pdev->dev, "Requesting peripherals failed!\n");
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc = -ENOMEM;
|
||||
miibus = mdiobus_alloc();
|
||||
if (miibus == NULL)
|
||||
goto out_err_alloc;
|
||||
miibus->read = bfin_mdiobus_read;
|
||||
miibus->write = bfin_mdiobus_write;
|
||||
miibus->reset = bfin_mdiobus_reset;
|
||||
|
||||
miibus->parent = &pdev->dev;
|
||||
miibus->name = "bfin_mii_bus";
|
||||
snprintf(miibus->id, MII_BUS_ID_SIZE, "0");
|
||||
miibus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
|
||||
if (miibus->irq == NULL)
|
||||
goto out_err_alloc;
|
||||
for (i = 0; i < PHY_MAX_ADDR; ++i)
|
||||
miibus->irq[i] = PHY_POLL;
|
||||
|
||||
rc = mdiobus_register(miibus);
|
||||
if (rc) {
|
||||
dev_err(&pdev->dev, "Cannot register MDIO bus!\n");
|
||||
goto out_err_mdiobus_register;
|
||||
}
|
||||
|
||||
platform_set_drvdata(pdev, miibus);
|
||||
return 0;
|
||||
|
||||
out_err_mdiobus_register:
|
||||
mdiobus_free(miibus);
|
||||
out_err_alloc:
|
||||
peripheral_free_list(pin_req);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int __devexit bfin_mii_bus_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct mii_bus *miibus = platform_get_drvdata(pdev);
|
||||
platform_set_drvdata(pdev, NULL);
|
||||
mdiobus_unregister(miibus);
|
||||
mdiobus_free(miibus);
|
||||
peripheral_free_list(pin_req);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct platform_driver bfin_mii_bus_driver = {
|
||||
.probe = bfin_mii_bus_probe,
|
||||
.remove = __devexit_p(bfin_mii_bus_remove),
|
||||
.driver = {
|
||||
.name = "bfin_mii_bus",
|
||||
.owner = THIS_MODULE,
|
||||
},
|
||||
};
|
||||
|
||||
static struct platform_driver bfin_mac_driver = {
|
||||
.probe = bfin_mac_probe,
|
||||
.remove = __devexit_p(bfin_mac_remove),
|
||||
|
@ -1202,7 +1195,11 @@ static struct platform_driver bfin_mac_driver = {
|
|||
|
||||
static int __init bfin_mac_init(void)
|
||||
{
|
||||
return platform_driver_register(&bfin_mac_driver);
|
||||
int ret;
|
||||
ret = platform_driver_register(&bfin_mii_bus_driver);
|
||||
if (!ret)
|
||||
return platform_driver_register(&bfin_mac_driver);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
module_init(bfin_mac_init);
|
||||
|
@ -1210,6 +1207,7 @@ module_init(bfin_mac_init);
|
|||
static void __exit bfin_mac_cleanup(void)
|
||||
{
|
||||
platform_driver_unregister(&bfin_mac_driver);
|
||||
platform_driver_unregister(&bfin_mii_bus_driver);
|
||||
}
|
||||
|
||||
module_exit(bfin_mac_cleanup);
|
||||
|
|
|
@ -48,6 +48,7 @@
|
|||
#include <linux/cache.h>
|
||||
#include <linux/firmware.h>
|
||||
#include <linux/log2.h>
|
||||
#include <linux/list.h>
|
||||
|
||||
#include "bnx2.h"
|
||||
#include "bnx2_fw.h"
|
||||
|
@ -545,8 +546,7 @@ bnx2_free_rx_mem(struct bnx2 *bp)
|
|||
rxr->rx_desc_mapping[j]);
|
||||
rxr->rx_desc_ring[j] = NULL;
|
||||
}
|
||||
if (rxr->rx_buf_ring)
|
||||
vfree(rxr->rx_buf_ring);
|
||||
vfree(rxr->rx_buf_ring);
|
||||
rxr->rx_buf_ring = NULL;
|
||||
|
||||
for (j = 0; j < bp->rx_max_pg_ring; j++) {
|
||||
|
@ -556,8 +556,7 @@ bnx2_free_rx_mem(struct bnx2 *bp)
|
|||
rxr->rx_pg_desc_mapping[j]);
|
||||
rxr->rx_pg_desc_ring[j] = NULL;
|
||||
}
|
||||
if (rxr->rx_pg_ring)
|
||||
vfree(rxr->rx_pg_ring);
|
||||
vfree(rxr->rx_pg_ring);
|
||||
rxr->rx_pg_ring = NULL;
|
||||
}
|
||||
}
|
||||
|
@ -3310,7 +3309,7 @@ bnx2_set_rx_mode(struct net_device *dev)
|
|||
{
|
||||
struct bnx2 *bp = netdev_priv(dev);
|
||||
u32 rx_mode, sort_mode;
|
||||
struct dev_addr_list *uc_ptr;
|
||||
struct netdev_hw_addr *ha;
|
||||
int i;
|
||||
|
||||
if (!netif_running(dev))
|
||||
|
@ -3369,21 +3368,19 @@ bnx2_set_rx_mode(struct net_device *dev)
|
|||
sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
|
||||
}
|
||||
|
||||
uc_ptr = NULL;
|
||||
if (dev->uc_count > BNX2_MAX_UNICAST_ADDRESSES) {
|
||||
rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
|
||||
sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
|
||||
BNX2_RPM_SORT_USER0_PROM_VLAN;
|
||||
} else if (!(dev->flags & IFF_PROMISC)) {
|
||||
uc_ptr = dev->uc_list;
|
||||
|
||||
/* Add all entries into to the match filter list */
|
||||
for (i = 0; i < dev->uc_count; i++) {
|
||||
bnx2_set_mac_addr(bp, uc_ptr->da_addr,
|
||||
i = 0;
|
||||
list_for_each_entry(ha, &dev->uc_list, list) {
|
||||
bnx2_set_mac_addr(bp, ha->addr,
|
||||
i + BNX2_START_UNICAST_ADDRESS_INDEX);
|
||||
sort_mode |= (1 <<
|
||||
(i + BNX2_START_UNICAST_ADDRESS_INDEX));
|
||||
uc_ptr = uc_ptr->next;
|
||||
i++;
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -5488,7 +5485,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
|
|||
dev_kfree_skb(skb);
|
||||
return -EIO;
|
||||
}
|
||||
map = skb_shinfo(skb)->dma_maps[0];
|
||||
map = skb_shinfo(skb)->dma_head;
|
||||
|
||||
REG_WR(bp, BNX2_HC_COMMAND,
|
||||
bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
|
||||
|
@ -6168,7 +6165,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
}
|
||||
|
||||
sp = skb_shinfo(skb);
|
||||
mapping = sp->dma_maps[0];
|
||||
mapping = sp->dma_head;
|
||||
|
||||
tx_buf = &txr->tx_buf_ring[ring_prod];
|
||||
tx_buf->skb = skb;
|
||||
|
@ -6192,7 +6189,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
txbd = &txr->tx_desc_ring[ring_prod];
|
||||
|
||||
len = frag->size;
|
||||
mapping = sp->dma_maps[i + 1];
|
||||
mapping = sp->dma_maps[i];
|
||||
|
||||
txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
|
||||
txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
|
||||
|
@ -6211,7 +6208,6 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
mmiowb();
|
||||
|
||||
txr->tx_prod = prod;
|
||||
dev->trans_start = jiffies;
|
||||
|
||||
if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
|
||||
netif_tx_stop_queue(txq);
|
||||
|
|
|
@ -10617,7 +10617,6 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
mmiowb();
|
||||
|
||||
fp->tx_bd_prod += nbd;
|
||||
dev->trans_start = jiffies;
|
||||
|
||||
if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
|
||||
/* We want bnx2x_tx_int to "see" the updated tx_bd_prod
|
||||
|
|
|
@ -2405,8 +2405,7 @@ static void bond_miimon_commit(struct bonding *bond)
|
|||
bond_3ad_handle_link_change(slave,
|
||||
BOND_LINK_DOWN);
|
||||
|
||||
if (bond->params.mode == BOND_MODE_TLB ||
|
||||
bond->params.mode == BOND_MODE_ALB)
|
||||
if (bond_is_lb(bond))
|
||||
bond_alb_handle_link_change(bond, slave,
|
||||
BOND_LINK_DOWN);
|
||||
|
||||
|
|
|
@ -1541,6 +1541,7 @@ int bond_create_sysfs(void)
|
|||
printk(KERN_ERR
|
||||
"network device named %s already exists in sysfs",
|
||||
class_attr_bonding_masters.attr.name);
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -286,8 +286,7 @@ static inline unsigned long slave_last_rx(struct bonding *bond,
|
|||
static inline void bond_set_slave_inactive_flags(struct slave *slave)
|
||||
{
|
||||
struct bonding *bond = netdev_priv(slave->dev->master);
|
||||
if (bond->params.mode != BOND_MODE_TLB &&
|
||||
bond->params.mode != BOND_MODE_ALB)
|
||||
if (!bond_is_lb(bond))
|
||||
slave->state = BOND_STATE_BACKUP;
|
||||
slave->dev->priv_flags |= IFF_SLAVE_INACTIVE;
|
||||
if (slave_do_arp_validate(bond, slave))
|
||||
|
|
|
@ -51,6 +51,15 @@ config CAN_SJA1000_PLATFORM
|
|||
boards from Phytec (http://www.phytec.de) like the PCM027,
|
||||
PCM038.
|
||||
|
||||
config CAN_SJA1000_OF_PLATFORM
|
||||
depends on CAN_SJA1000 && PPC_OF
|
||||
tristate "Generic OF Platform Bus based SJA1000 driver"
|
||||
---help---
|
||||
This driver adds support for the SJA1000 chips connected to
|
||||
the OpenFirmware "platform bus" found on embedded systems with
|
||||
OpenFirmware bindings, e.g. if you have a PowerPC based system
|
||||
you may want to enable this option.
|
||||
|
||||
config CAN_EMS_PCI
|
||||
tristate "EMS CPC-PCI and CPC-PCIe Card"
|
||||
depends on PCI && CAN_SJA1000
|
||||
|
|
|
@ -477,7 +477,7 @@ int open_candev(struct net_device *dev)
|
|||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(open_candev);
|
||||
EXPORT_SYMBOL_GPL(open_candev);
|
||||
|
||||
/*
|
||||
* Common close function for cleanup before the device gets closed.
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
|
||||
obj-$(CONFIG_CAN_SJA1000) += sja1000.o
|
||||
obj-$(CONFIG_CAN_SJA1000_PLATFORM) += sja1000_platform.o
|
||||
obj-$(CONFIG_CAN_SJA1000_OF_PLATFORM) += sja1000_of_platform.o
|
||||
obj-$(CONFIG_CAN_EMS_PCI) += ems_pci.o
|
||||
obj-$(CONFIG_CAN_KVASER_PCI) += kvaser_pci.o
|
||||
|
||||
|
|
|
@ -99,25 +99,21 @@ MODULE_DEVICE_TABLE(pci, ems_pci_tbl);
|
|||
*/
|
||||
static u8 ems_pci_readb(struct ems_pci_card *card, unsigned int port)
|
||||
{
|
||||
return readb((void __iomem *)card->base_addr
|
||||
+ (port * EMS_PCI_PORT_BYTES));
|
||||
return readb(card->base_addr + (port * EMS_PCI_PORT_BYTES));
|
||||
}
|
||||
|
||||
static u8 ems_pci_read_reg(const struct net_device *dev, int port)
|
||||
static u8 ems_pci_read_reg(const struct sja1000_priv *priv, int port)
|
||||
{
|
||||
return readb((void __iomem *)dev->base_addr
|
||||
+ (port * EMS_PCI_PORT_BYTES));
|
||||
return readb(priv->reg_base + (port * EMS_PCI_PORT_BYTES));
|
||||
}
|
||||
|
||||
static void ems_pci_write_reg(const struct net_device *dev, int port, u8 val)
|
||||
static void ems_pci_write_reg(const struct sja1000_priv *priv, int port, u8 val)
|
||||
{
|
||||
writeb(val, (void __iomem *)dev->base_addr
|
||||
+ (port * EMS_PCI_PORT_BYTES));
|
||||
writeb(val, priv->reg_base + (port * EMS_PCI_PORT_BYTES));
|
||||
}
|
||||
|
||||
static void ems_pci_post_irq(const struct net_device *dev)
|
||||
static void ems_pci_post_irq(const struct sja1000_priv *priv)
|
||||
{
|
||||
struct sja1000_priv *priv = netdev_priv(dev);
|
||||
struct ems_pci_card *card = (struct ems_pci_card *)priv->priv;
|
||||
|
||||
/* reset int flag of pita */
|
||||
|
@ -129,17 +125,17 @@ static void ems_pci_post_irq(const struct net_device *dev)
|
|||
* Check if a CAN controller is present at the specified location
|
||||
* by trying to set 'em into the PeliCAN mode
|
||||
*/
|
||||
static inline int ems_pci_check_chan(struct net_device *dev)
|
||||
static inline int ems_pci_check_chan(const struct sja1000_priv *priv)
|
||||
{
|
||||
unsigned char res;
|
||||
|
||||
/* Make sure SJA1000 is in reset mode */
|
||||
ems_pci_write_reg(dev, REG_MOD, 1);
|
||||
ems_pci_write_reg(priv, REG_MOD, 1);
|
||||
|
||||
ems_pci_write_reg(dev, REG_CDR, CDR_PELICAN);
|
||||
ems_pci_write_reg(priv, REG_CDR, CDR_PELICAN);
|
||||
|
||||
/* read reset-values */
|
||||
res = ems_pci_read_reg(dev, REG_CDR);
|
||||
res = ems_pci_read_reg(priv, REG_CDR);
|
||||
|
||||
if (res == CDR_PELICAN)
|
||||
return 1;
|
||||
|
@ -218,14 +214,12 @@ static int __devinit ems_pci_add_card(struct pci_dev *pdev,
|
|||
card->conf_addr = pci_iomap(pdev, 0, EMS_PCI_MEM_SIZE);
|
||||
if (card->conf_addr == NULL) {
|
||||
err = -ENOMEM;
|
||||
|
||||
goto failure_cleanup;
|
||||
}
|
||||
|
||||
card->base_addr = pci_iomap(pdev, 1, EMS_PCI_MEM_SIZE);
|
||||
if (card->base_addr == NULL) {
|
||||
err = -ENOMEM;
|
||||
|
||||
goto failure_cleanup;
|
||||
}
|
||||
|
||||
|
@ -239,7 +233,6 @@ static int __devinit ems_pci_add_card(struct pci_dev *pdev,
|
|||
ems_pci_readb(card, 3) != 0xCB ||
|
||||
ems_pci_readb(card, 4) != 0x11) {
|
||||
dev_err(&pdev->dev, "Not EMS Dr. Thomas Wuensche interface\n");
|
||||
|
||||
err = -ENODEV;
|
||||
goto failure_cleanup;
|
||||
}
|
||||
|
@ -260,12 +253,11 @@ static int __devinit ems_pci_add_card(struct pci_dev *pdev,
|
|||
priv->irq_flags = IRQF_SHARED;
|
||||
|
||||
dev->irq = pdev->irq;
|
||||
dev->base_addr = (unsigned long)(card->base_addr
|
||||
+ EMS_PCI_CAN_BASE_OFFSET
|
||||
+ (i * EMS_PCI_CAN_CTRL_SIZE));
|
||||
priv->reg_base = card->base_addr + EMS_PCI_CAN_BASE_OFFSET
|
||||
+ (i * EMS_PCI_CAN_CTRL_SIZE);
|
||||
|
||||
/* Check if channel is present */
|
||||
if (ems_pci_check_chan(dev)) {
|
||||
if (ems_pci_check_chan(priv)) {
|
||||
priv->read_reg = ems_pci_read_reg;
|
||||
priv->write_reg = ems_pci_write_reg;
|
||||
priv->post_irq = ems_pci_post_irq;
|
||||
|
@ -289,9 +281,8 @@ static int __devinit ems_pci_add_card(struct pci_dev *pdev,
|
|||
|
||||
card->channels++;
|
||||
|
||||
dev_info(&pdev->dev, "Channel #%d at %#lX, irq %d\n",
|
||||
i + 1, dev->base_addr,
|
||||
dev->irq);
|
||||
dev_info(&pdev->dev, "Channel #%d at 0x%p, irq %d\n",
|
||||
i + 1, priv->reg_base, dev->irq);
|
||||
} else {
|
||||
free_sja1000dev(dev);
|
||||
}
|
||||
|
|
|
@ -117,14 +117,15 @@ static struct pci_device_id kvaser_pci_tbl[] = {
|
|||
|
||||
MODULE_DEVICE_TABLE(pci, kvaser_pci_tbl);
|
||||
|
||||
static u8 kvaser_pci_read_reg(const struct net_device *dev, int port)
|
||||
static u8 kvaser_pci_read_reg(const struct sja1000_priv *priv, int port)
|
||||
{
|
||||
return ioread8((void __iomem *)(dev->base_addr + port));
|
||||
return ioread8(priv->reg_base + port);
|
||||
}
|
||||
|
||||
static void kvaser_pci_write_reg(const struct net_device *dev, int port, u8 val)
|
||||
static void kvaser_pci_write_reg(const struct sja1000_priv *priv,
|
||||
int port, u8 val)
|
||||
{
|
||||
iowrite8(val, (void __iomem *)(dev->base_addr + port));
|
||||
iowrite8(val, priv->reg_base + port);
|
||||
}
|
||||
|
||||
static void kvaser_pci_disable_irq(struct net_device *dev)
|
||||
|
@ -199,7 +200,7 @@ static void kvaser_pci_del_chan(struct net_device *dev)
|
|||
}
|
||||
unregister_sja1000dev(dev);
|
||||
|
||||
pci_iounmap(board->pci_dev, (void __iomem *)dev->base_addr);
|
||||
pci_iounmap(board->pci_dev, priv->reg_base);
|
||||
pci_iounmap(board->pci_dev, board->conf_addr);
|
||||
pci_iounmap(board->pci_dev, board->res_addr);
|
||||
|
||||
|
@ -210,7 +211,7 @@ static int kvaser_pci_add_chan(struct pci_dev *pdev, int channel,
|
|||
struct net_device **master_dev,
|
||||
void __iomem *conf_addr,
|
||||
void __iomem *res_addr,
|
||||
unsigned long base_addr)
|
||||
void __iomem *base_addr)
|
||||
{
|
||||
struct net_device *dev;
|
||||
struct sja1000_priv *priv;
|
||||
|
@ -252,7 +253,7 @@ static int kvaser_pci_add_chan(struct pci_dev *pdev, int channel,
|
|||
board->xilinx_ver = master_board->xilinx_ver;
|
||||
}
|
||||
|
||||
dev->base_addr = base_addr + channel * KVASER_PCI_PORT_BYTES;
|
||||
priv->reg_base = base_addr + channel * KVASER_PCI_PORT_BYTES;
|
||||
|
||||
priv->read_reg = kvaser_pci_read_reg;
|
||||
priv->write_reg = kvaser_pci_write_reg;
|
||||
|
@ -267,8 +268,8 @@ static int kvaser_pci_add_chan(struct pci_dev *pdev, int channel,
|
|||
|
||||
init_step = 4;
|
||||
|
||||
dev_info(&pdev->dev, "base_addr=%#lx conf_addr=%p irq=%d\n",
|
||||
dev->base_addr, board->conf_addr, dev->irq);
|
||||
dev_info(&pdev->dev, "reg_base=%p conf_addr=%p irq=%d\n",
|
||||
priv->reg_base, board->conf_addr, dev->irq);
|
||||
|
||||
SET_NETDEV_DEV(dev, &pdev->dev);
|
||||
|
||||
|
@ -343,7 +344,7 @@ static int __devinit kvaser_pci_init_one(struct pci_dev *pdev,
|
|||
for (i = 0; i < no_channels; i++) {
|
||||
err = kvaser_pci_add_chan(pdev, i, &master_dev,
|
||||
conf_addr, res_addr,
|
||||
(unsigned long)base_addr);
|
||||
base_addr);
|
||||
if (err)
|
||||
goto failure_cleanup;
|
||||
}
|
||||
|
|
|
@ -89,7 +89,7 @@ static int sja1000_probe_chip(struct net_device *dev)
|
|||
{
|
||||
struct sja1000_priv *priv = netdev_priv(dev);
|
||||
|
||||
if (dev->base_addr && (priv->read_reg(dev, 0) == 0xFF)) {
|
||||
if (priv->reg_base && (priv->read_reg(priv, 0) == 0xFF)) {
|
||||
printk(KERN_INFO "%s: probing @0x%lX failed\n",
|
||||
DRV_NAME, dev->base_addr);
|
||||
return 0;
|
||||
|
@ -100,11 +100,11 @@ static int sja1000_probe_chip(struct net_device *dev)
|
|||
static void set_reset_mode(struct net_device *dev)
|
||||
{
|
||||
struct sja1000_priv *priv = netdev_priv(dev);
|
||||
unsigned char status = priv->read_reg(dev, REG_MOD);
|
||||
unsigned char status = priv->read_reg(priv, REG_MOD);
|
||||
int i;
|
||||
|
||||
/* disable interrupts */
|
||||
priv->write_reg(dev, REG_IER, IRQ_OFF);
|
||||
priv->write_reg(priv, REG_IER, IRQ_OFF);
|
||||
|
||||
for (i = 0; i < 100; i++) {
|
||||
/* check reset bit */
|
||||
|
@ -113,9 +113,9 @@ static void set_reset_mode(struct net_device *dev)
|
|||
return;
|
||||
}
|
||||
|
||||
priv->write_reg(dev, REG_MOD, MOD_RM); /* reset chip */
|
||||
priv->write_reg(priv, REG_MOD, MOD_RM); /* reset chip */
|
||||
udelay(10);
|
||||
status = priv->read_reg(dev, REG_MOD);
|
||||
status = priv->read_reg(priv, REG_MOD);
|
||||
}
|
||||
|
||||
dev_err(dev->dev.parent, "setting SJA1000 into reset mode failed!\n");
|
||||
|
@ -124,7 +124,7 @@ static void set_reset_mode(struct net_device *dev)
|
|||
static void set_normal_mode(struct net_device *dev)
|
||||
{
|
||||
struct sja1000_priv *priv = netdev_priv(dev);
|
||||
unsigned char status = priv->read_reg(dev, REG_MOD);
|
||||
unsigned char status = priv->read_reg(priv, REG_MOD);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 100; i++) {
|
||||
|
@ -132,14 +132,14 @@ static void set_normal_mode(struct net_device *dev)
|
|||
if ((status & MOD_RM) == 0) {
|
||||
priv->can.state = CAN_STATE_ERROR_ACTIVE;
|
||||
/* enable all interrupts */
|
||||
priv->write_reg(dev, REG_IER, IRQ_ALL);
|
||||
priv->write_reg(priv, REG_IER, IRQ_ALL);
|
||||
return;
|
||||
}
|
||||
|
||||
/* set chip to normal mode */
|
||||
priv->write_reg(dev, REG_MOD, 0x00);
|
||||
priv->write_reg(priv, REG_MOD, 0x00);
|
||||
udelay(10);
|
||||
status = priv->read_reg(dev, REG_MOD);
|
||||
status = priv->read_reg(priv, REG_MOD);
|
||||
}
|
||||
|
||||
dev_err(dev->dev.parent, "setting SJA1000 into normal mode failed!\n");
|
||||
|
@ -154,9 +154,9 @@ static void sja1000_start(struct net_device *dev)
|
|||
set_reset_mode(dev);
|
||||
|
||||
/* Clear error counters and error code capture */
|
||||
priv->write_reg(dev, REG_TXERR, 0x0);
|
||||
priv->write_reg(dev, REG_RXERR, 0x0);
|
||||
priv->read_reg(dev, REG_ECC);
|
||||
priv->write_reg(priv, REG_TXERR, 0x0);
|
||||
priv->write_reg(priv, REG_RXERR, 0x0);
|
||||
priv->read_reg(priv, REG_ECC);
|
||||
|
||||
/* leave reset mode */
|
||||
set_normal_mode(dev);
|
||||
|
@ -198,8 +198,8 @@ static int sja1000_set_bittiming(struct net_device *dev)
|
|||
dev_info(dev->dev.parent,
|
||||
"setting BTR0=0x%02x BTR1=0x%02x\n", btr0, btr1);
|
||||
|
||||
priv->write_reg(dev, REG_BTR0, btr0);
|
||||
priv->write_reg(dev, REG_BTR1, btr1);
|
||||
priv->write_reg(priv, REG_BTR0, btr0);
|
||||
priv->write_reg(priv, REG_BTR1, btr1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -217,20 +217,20 @@ static void chipset_init(struct net_device *dev)
|
|||
struct sja1000_priv *priv = netdev_priv(dev);
|
||||
|
||||
/* set clock divider and output control register */
|
||||
priv->write_reg(dev, REG_CDR, priv->cdr | CDR_PELICAN);
|
||||
priv->write_reg(priv, REG_CDR, priv->cdr | CDR_PELICAN);
|
||||
|
||||
/* set acceptance filter (accept all) */
|
||||
priv->write_reg(dev, REG_ACCC0, 0x00);
|
||||
priv->write_reg(dev, REG_ACCC1, 0x00);
|
||||
priv->write_reg(dev, REG_ACCC2, 0x00);
|
||||
priv->write_reg(dev, REG_ACCC3, 0x00);
|
||||
priv->write_reg(priv, REG_ACCC0, 0x00);
|
||||
priv->write_reg(priv, REG_ACCC1, 0x00);
|
||||
priv->write_reg(priv, REG_ACCC2, 0x00);
|
||||
priv->write_reg(priv, REG_ACCC3, 0x00);
|
||||
|
||||
priv->write_reg(dev, REG_ACCM0, 0xFF);
|
||||
priv->write_reg(dev, REG_ACCM1, 0xFF);
|
||||
priv->write_reg(dev, REG_ACCM2, 0xFF);
|
||||
priv->write_reg(dev, REG_ACCM3, 0xFF);
|
||||
priv->write_reg(priv, REG_ACCM0, 0xFF);
|
||||
priv->write_reg(priv, REG_ACCM1, 0xFF);
|
||||
priv->write_reg(priv, REG_ACCM2, 0xFF);
|
||||
priv->write_reg(priv, REG_ACCM3, 0xFF);
|
||||
|
||||
priv->write_reg(dev, REG_OCR, priv->ocr | OCR_MODE_NORMAL);
|
||||
priv->write_reg(priv, REG_OCR, priv->ocr | OCR_MODE_NORMAL);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -261,27 +261,27 @@ static int sja1000_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
if (id & CAN_EFF_FLAG) {
|
||||
fi |= FI_FF;
|
||||
dreg = EFF_BUF;
|
||||
priv->write_reg(dev, REG_FI, fi);
|
||||
priv->write_reg(dev, REG_ID1, (id & 0x1fe00000) >> (5 + 16));
|
||||
priv->write_reg(dev, REG_ID2, (id & 0x001fe000) >> (5 + 8));
|
||||
priv->write_reg(dev, REG_ID3, (id & 0x00001fe0) >> 5);
|
||||
priv->write_reg(dev, REG_ID4, (id & 0x0000001f) << 3);
|
||||
priv->write_reg(priv, REG_FI, fi);
|
||||
priv->write_reg(priv, REG_ID1, (id & 0x1fe00000) >> (5 + 16));
|
||||
priv->write_reg(priv, REG_ID2, (id & 0x001fe000) >> (5 + 8));
|
||||
priv->write_reg(priv, REG_ID3, (id & 0x00001fe0) >> 5);
|
||||
priv->write_reg(priv, REG_ID4, (id & 0x0000001f) << 3);
|
||||
} else {
|
||||
dreg = SFF_BUF;
|
||||
priv->write_reg(dev, REG_FI, fi);
|
||||
priv->write_reg(dev, REG_ID1, (id & 0x000007f8) >> 3);
|
||||
priv->write_reg(dev, REG_ID2, (id & 0x00000007) << 5);
|
||||
priv->write_reg(priv, REG_FI, fi);
|
||||
priv->write_reg(priv, REG_ID1, (id & 0x000007f8) >> 3);
|
||||
priv->write_reg(priv, REG_ID2, (id & 0x00000007) << 5);
|
||||
}
|
||||
|
||||
for (i = 0; i < dlc; i++)
|
||||
priv->write_reg(dev, dreg++, cf->data[i]);
|
||||
priv->write_reg(priv, dreg++, cf->data[i]);
|
||||
|
||||
stats->tx_bytes += dlc;
|
||||
dev->trans_start = jiffies;
|
||||
|
||||
can_put_echo_skb(skb, dev, 0);
|
||||
|
||||
priv->write_reg(dev, REG_CMR, CMD_TR);
|
||||
priv->write_reg(priv, REG_CMR, CMD_TR);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -304,22 +304,22 @@ static void sja1000_rx(struct net_device *dev)
|
|||
skb->dev = dev;
|
||||
skb->protocol = htons(ETH_P_CAN);
|
||||
|
||||
fi = priv->read_reg(dev, REG_FI);
|
||||
fi = priv->read_reg(priv, REG_FI);
|
||||
dlc = fi & 0x0F;
|
||||
|
||||
if (fi & FI_FF) {
|
||||
/* extended frame format (EFF) */
|
||||
dreg = EFF_BUF;
|
||||
id = (priv->read_reg(dev, REG_ID1) << (5 + 16))
|
||||
| (priv->read_reg(dev, REG_ID2) << (5 + 8))
|
||||
| (priv->read_reg(dev, REG_ID3) << 5)
|
||||
| (priv->read_reg(dev, REG_ID4) >> 3);
|
||||
id = (priv->read_reg(priv, REG_ID1) << (5 + 16))
|
||||
| (priv->read_reg(priv, REG_ID2) << (5 + 8))
|
||||
| (priv->read_reg(priv, REG_ID3) << 5)
|
||||
| (priv->read_reg(priv, REG_ID4) >> 3);
|
||||
id |= CAN_EFF_FLAG;
|
||||
} else {
|
||||
/* standard frame format (SFF) */
|
||||
dreg = SFF_BUF;
|
||||
id = (priv->read_reg(dev, REG_ID1) << 3)
|
||||
| (priv->read_reg(dev, REG_ID2) >> 5);
|
||||
id = (priv->read_reg(priv, REG_ID1) << 3)
|
||||
| (priv->read_reg(priv, REG_ID2) >> 5);
|
||||
}
|
||||
|
||||
if (fi & FI_RTR)
|
||||
|
@ -330,13 +330,13 @@ static void sja1000_rx(struct net_device *dev)
|
|||
cf->can_id = id;
|
||||
cf->can_dlc = dlc;
|
||||
for (i = 0; i < dlc; i++)
|
||||
cf->data[i] = priv->read_reg(dev, dreg++);
|
||||
cf->data[i] = priv->read_reg(priv, dreg++);
|
||||
|
||||
while (i < 8)
|
||||
cf->data[i++] = 0;
|
||||
|
||||
/* release receive buffer */
|
||||
priv->write_reg(dev, REG_CMR, CMD_RRB);
|
||||
priv->write_reg(priv, REG_CMR, CMD_RRB);
|
||||
|
||||
netif_rx(skb);
|
||||
|
||||
|
@ -371,7 +371,7 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
|
|||
cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
|
||||
stats->rx_over_errors++;
|
||||
stats->rx_errors++;
|
||||
priv->write_reg(dev, REG_CMR, CMD_CDO); /* clear bit */
|
||||
priv->write_reg(priv, REG_CMR, CMD_CDO); /* clear bit */
|
||||
}
|
||||
|
||||
if (isrc & IRQ_EI) {
|
||||
|
@ -392,7 +392,7 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
|
|||
priv->can.can_stats.bus_error++;
|
||||
stats->rx_errors++;
|
||||
|
||||
ecc = priv->read_reg(dev, REG_ECC);
|
||||
ecc = priv->read_reg(priv, REG_ECC);
|
||||
|
||||
cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
|
||||
|
||||
|
@ -426,7 +426,7 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
|
|||
if (isrc & IRQ_ALI) {
|
||||
/* arbitration lost interrupt */
|
||||
dev_dbg(dev->dev.parent, "arbitration lost interrupt\n");
|
||||
alc = priv->read_reg(dev, REG_ALC);
|
||||
alc = priv->read_reg(priv, REG_ALC);
|
||||
priv->can.can_stats.arbitration_lost++;
|
||||
stats->rx_errors++;
|
||||
cf->can_id |= CAN_ERR_LOSTARB;
|
||||
|
@ -435,8 +435,8 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
|
|||
|
||||
if (state != priv->can.state && (state == CAN_STATE_ERROR_WARNING ||
|
||||
state == CAN_STATE_ERROR_PASSIVE)) {
|
||||
uint8_t rxerr = priv->read_reg(dev, REG_RXERR);
|
||||
uint8_t txerr = priv->read_reg(dev, REG_TXERR);
|
||||
uint8_t rxerr = priv->read_reg(priv, REG_RXERR);
|
||||
uint8_t txerr = priv->read_reg(priv, REG_TXERR);
|
||||
cf->can_id |= CAN_ERR_CRTL;
|
||||
if (state == CAN_STATE_ERROR_WARNING) {
|
||||
priv->can.can_stats.error_warning++;
|
||||
|
@ -471,15 +471,15 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
|
|||
int n = 0;
|
||||
|
||||
/* Shared interrupts and IRQ off? */
|
||||
if (priv->read_reg(dev, REG_IER) == IRQ_OFF)
|
||||
if (priv->read_reg(priv, REG_IER) == IRQ_OFF)
|
||||
return IRQ_NONE;
|
||||
|
||||
if (priv->pre_irq)
|
||||
priv->pre_irq(dev);
|
||||
priv->pre_irq(priv);
|
||||
|
||||
while ((isrc = priv->read_reg(dev, REG_IR)) && (n < SJA1000_MAX_IRQ)) {
|
||||
while ((isrc = priv->read_reg(priv, REG_IR)) && (n < SJA1000_MAX_IRQ)) {
|
||||
n++;
|
||||
status = priv->read_reg(dev, REG_SR);
|
||||
status = priv->read_reg(priv, REG_SR);
|
||||
|
||||
if (isrc & IRQ_WUI)
|
||||
dev_warn(dev->dev.parent, "wakeup interrupt\n");
|
||||
|
@ -494,7 +494,7 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
|
|||
/* receive interrupt */
|
||||
while (status & SR_RBS) {
|
||||
sja1000_rx(dev);
|
||||
status = priv->read_reg(dev, REG_SR);
|
||||
status = priv->read_reg(priv, REG_SR);
|
||||
}
|
||||
}
|
||||
if (isrc & (IRQ_DOI | IRQ_EI | IRQ_BEI | IRQ_EPI | IRQ_ALI)) {
|
||||
|
@ -505,7 +505,7 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
|
|||
}
|
||||
|
||||
if (priv->post_irq)
|
||||
priv->post_irq(dev);
|
||||
priv->post_irq(priv);
|
||||
|
||||
if (n >= SJA1000_MAX_IRQ)
|
||||
dev_dbg(dev->dev.parent, "%d messages handled in ISR", n);
|
||||
|
@ -532,8 +532,8 @@ static int sja1000_open(struct net_device *dev)
|
|||
err = request_irq(dev->irq, &sja1000_interrupt, priv->irq_flags,
|
||||
dev->name, (void *)dev);
|
||||
if (err) {
|
||||
return -EAGAIN;
|
||||
close_candev(dev);
|
||||
return -EAGAIN;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -155,14 +155,15 @@ struct sja1000_priv {
|
|||
struct sk_buff *echo_skb;
|
||||
|
||||
/* the lower-layer is responsible for appropriate locking */
|
||||
u8 (*read_reg) (const struct net_device *dev, int reg);
|
||||
void (*write_reg) (const struct net_device *dev, int reg, u8 val);
|
||||
void (*pre_irq) (const struct net_device *dev);
|
||||
void (*post_irq) (const struct net_device *dev);
|
||||
u8 (*read_reg) (const struct sja1000_priv *priv, int reg);
|
||||
void (*write_reg) (const struct sja1000_priv *priv, int reg, u8 val);
|
||||
void (*pre_irq) (const struct sja1000_priv *priv);
|
||||
void (*post_irq) (const struct sja1000_priv *priv);
|
||||
|
||||
void *priv; /* for board-specific data */
|
||||
struct net_device *dev;
|
||||
|
||||
void __iomem *reg_base; /* ioremap'ed address to registers */
|
||||
unsigned long irq_flags; /* for request_irq() */
|
||||
|
||||
u16 flags; /* custom mode flags */
|
||||
|
|
235
drivers/net/can/sja1000/sja1000_of_platform.c
Normal file
235
drivers/net/can/sja1000/sja1000_of_platform.c
Normal file
|
@ -0,0 +1,235 @@
|
|||
/*
|
||||
* Driver for SJA1000 CAN controllers on the OpenFirmware platform bus
|
||||
*
|
||||
* Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the version 2 of the GNU General Public License
|
||||
* as published by the Free Software Foundation
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software Foundation,
|
||||
* Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
*/
|
||||
|
||||
/* This is a generic driver for SJA1000 chips on the OpenFirmware platform
|
||||
* bus found on embedded PowerPC systems. You need a SJA1000 CAN node
|
||||
* definition in your flattened device tree source (DTS) file similar to:
|
||||
*
|
||||
* can@3,100 {
|
||||
* compatible = "nxp,sja1000";
|
||||
* reg = <3 0x100 0x80>;
|
||||
* interrupts = <2 0>;
|
||||
* interrupt-parent = <&mpic>;
|
||||
* nxp,external-clock-frequency = <16000000>;
|
||||
* };
|
||||
*
|
||||
* See "Documentation/powerpc/dts-bindings/can/sja1000.txt" for further
|
||||
* information.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/can.h>
|
||||
#include <linux/can/dev.h>
|
||||
|
||||
#include <linux/of_platform.h>
|
||||
#include <asm/prom.h>
|
||||
|
||||
#include "sja1000.h"
|
||||
|
||||
#define DRV_NAME "sja1000_of_platform"
|
||||
|
||||
MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
|
||||
MODULE_DESCRIPTION("Socket-CAN driver for SJA1000 on the OF platform bus");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
|
||||
#define SJA1000_OFP_CAN_CLOCK (16000000 / 2)
|
||||
|
||||
#define SJA1000_OFP_OCR OCR_TX0_PULLDOWN
|
||||
#define SJA1000_OFP_CDR (CDR_CBP | CDR_CLK_OFF)
|
||||
|
||||
static u8 sja1000_ofp_read_reg(const struct sja1000_priv *priv, int reg)
|
||||
{
|
||||
return in_8(priv->reg_base + reg);
|
||||
}
|
||||
|
||||
static void sja1000_ofp_write_reg(const struct sja1000_priv *priv,
|
||||
int reg, u8 val)
|
||||
{
|
||||
out_8(priv->reg_base + reg, val);
|
||||
}
|
||||
|
||||
static int __devexit sja1000_ofp_remove(struct of_device *ofdev)
|
||||
{
|
||||
struct net_device *dev = dev_get_drvdata(&ofdev->dev);
|
||||
struct sja1000_priv *priv = netdev_priv(dev);
|
||||
struct device_node *np = ofdev->node;
|
||||
struct resource res;
|
||||
|
||||
dev_set_drvdata(&ofdev->dev, NULL);
|
||||
|
||||
unregister_sja1000dev(dev);
|
||||
free_sja1000dev(dev);
|
||||
iounmap(priv->reg_base);
|
||||
irq_dispose_mapping(dev->irq);
|
||||
|
||||
of_address_to_resource(np, 0, &res);
|
||||
release_mem_region(res.start, resource_size(&res));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __devinit sja1000_ofp_probe(struct of_device *ofdev,
|
||||
const struct of_device_id *id)
|
||||
{
|
||||
struct device_node *np = ofdev->node;
|
||||
struct net_device *dev;
|
||||
struct sja1000_priv *priv;
|
||||
struct resource res;
|
||||
const u32 *prop;
|
||||
int err, irq, res_size, prop_size;
|
||||
void __iomem *base;
|
||||
|
||||
err = of_address_to_resource(np, 0, &res);
|
||||
if (err) {
|
||||
dev_err(&ofdev->dev, "invalid address\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
res_size = resource_size(&res);
|
||||
|
||||
if (!request_mem_region(res.start, res_size, DRV_NAME)) {
|
||||
dev_err(&ofdev->dev, "couldn't request %#llx..%#llx\n",
|
||||
(unsigned long long)res.start,
|
||||
(unsigned long long)res.end);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
base = ioremap_nocache(res.start, res_size);
|
||||
if (!base) {
|
||||
dev_err(&ofdev->dev, "couldn't ioremap %#llx..%#llx\n",
|
||||
(unsigned long long)res.start,
|
||||
(unsigned long long)res.end);
|
||||
err = -ENOMEM;
|
||||
goto exit_release_mem;
|
||||
}
|
||||
|
||||
irq = irq_of_parse_and_map(np, 0);
|
||||
if (irq == NO_IRQ) {
|
||||
dev_err(&ofdev->dev, "no irq found\n");
|
||||
err = -ENODEV;
|
||||
goto exit_unmap_mem;
|
||||
}
|
||||
|
||||
dev = alloc_sja1000dev(0);
|
||||
if (!dev) {
|
||||
err = -ENOMEM;
|
||||
goto exit_dispose_irq;
|
||||
}
|
||||
|
||||
priv = netdev_priv(dev);
|
||||
|
||||
priv->read_reg = sja1000_ofp_read_reg;
|
||||
priv->write_reg = sja1000_ofp_write_reg;
|
||||
|
||||
prop = of_get_property(np, "nxp,external-clock-frequency", &prop_size);
|
||||
if (prop && (prop_size == sizeof(u32)))
|
||||
priv->can.clock.freq = *prop / 2;
|
||||
else
|
||||
priv->can.clock.freq = SJA1000_OFP_CAN_CLOCK; /* default */
|
||||
|
||||
prop = of_get_property(np, "nxp,tx-output-mode", &prop_size);
|
||||
if (prop && (prop_size == sizeof(u32)))
|
||||
priv->ocr |= *prop & OCR_MODE_MASK;
|
||||
else
|
||||
priv->ocr |= OCR_MODE_NORMAL; /* default */
|
||||
|
||||
prop = of_get_property(np, "nxp,tx-output-config", &prop_size);
|
||||
if (prop && (prop_size == sizeof(u32)))
|
||||
priv->ocr |= (*prop << OCR_TX_SHIFT) & OCR_TX_MASK;
|
||||
else
|
||||
priv->ocr |= OCR_TX0_PULLDOWN; /* default */
|
||||
|
||||
prop = of_get_property(np, "nxp,clock-out-frequency", &prop_size);
|
||||
if (prop && (prop_size == sizeof(u32)) && *prop) {
|
||||
u32 divider = priv->can.clock.freq * 2 / *prop;
|
||||
|
||||
if (divider > 1)
|
||||
priv->cdr |= divider / 2 - 1;
|
||||
else
|
||||
priv->cdr |= CDR_CLKOUT_MASK;
|
||||
} else {
|
||||
priv->cdr |= CDR_CLK_OFF; /* default */
|
||||
}
|
||||
|
||||
prop = of_get_property(np, "nxp,no-comparator-bypass", NULL);
|
||||
if (!prop)
|
||||
priv->cdr |= CDR_CBP; /* default */
|
||||
|
||||
priv->irq_flags = IRQF_SHARED;
|
||||
priv->reg_base = base;
|
||||
|
||||
dev->irq = irq;
|
||||
|
||||
dev_info(&ofdev->dev,
|
||||
"reg_base=0x%p irq=%d clock=%d ocr=0x%02x cdr=0x%02x\n",
|
||||
priv->reg_base, dev->irq, priv->can.clock.freq,
|
||||
priv->ocr, priv->cdr);
|
||||
|
||||
dev_set_drvdata(&ofdev->dev, dev);
|
||||
SET_NETDEV_DEV(dev, &ofdev->dev);
|
||||
|
||||
err = register_sja1000dev(dev);
|
||||
if (err) {
|
||||
dev_err(&ofdev->dev, "registering %s failed (err=%d)\n",
|
||||
DRV_NAME, err);
|
||||
goto exit_free_sja1000;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
exit_free_sja1000:
|
||||
free_sja1000dev(dev);
|
||||
exit_dispose_irq:
|
||||
irq_dispose_mapping(irq);
|
||||
exit_unmap_mem:
|
||||
iounmap(base);
|
||||
exit_release_mem:
|
||||
release_mem_region(res.start, res_size);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct of_device_id __devinitdata sja1000_ofp_table[] = {
|
||||
{.compatible = "nxp,sja1000"},
|
||||
{},
|
||||
};
|
||||
|
||||
static struct of_platform_driver sja1000_ofp_driver = {
|
||||
.owner = THIS_MODULE,
|
||||
.name = DRV_NAME,
|
||||
.probe = sja1000_ofp_probe,
|
||||
.remove = __devexit_p(sja1000_ofp_remove),
|
||||
.match_table = sja1000_ofp_table,
|
||||
};
|
||||
|
||||
static int __init sja1000_ofp_init(void)
|
||||
{
|
||||
return of_register_platform_driver(&sja1000_ofp_driver);
|
||||
}
|
||||
module_init(sja1000_ofp_init);
|
||||
|
||||
static void __exit sja1000_ofp_exit(void)
|
||||
{
|
||||
return of_unregister_platform_driver(&sja1000_ofp_driver);
|
||||
};
|
||||
module_exit(sja1000_ofp_exit);
|
|
@ -37,14 +37,14 @@ MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
|
|||
MODULE_DESCRIPTION("Socket-CAN driver for SJA1000 on the platform bus");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
|
||||
static u8 sp_read_reg(const struct net_device *dev, int reg)
|
||||
static u8 sp_read_reg(const struct sja1000_priv *priv, int reg)
|
||||
{
|
||||
return ioread8((void __iomem *)(dev->base_addr + reg));
|
||||
return ioread8(priv->reg_base + reg);
|
||||
}
|
||||
|
||||
static void sp_write_reg(const struct net_device *dev, int reg, u8 val)
|
||||
static void sp_write_reg(const struct sja1000_priv *priv, int reg, u8 val)
|
||||
{
|
||||
iowrite8(val, (void __iomem *)(dev->base_addr + reg));
|
||||
iowrite8(val, priv->reg_base + reg);
|
||||
}
|
||||
|
||||
static int sp_probe(struct platform_device *pdev)
|
||||
|
@ -89,9 +89,9 @@ static int sp_probe(struct platform_device *pdev)
|
|||
}
|
||||
priv = netdev_priv(dev);
|
||||
|
||||
dev->base_addr = (unsigned long)addr;
|
||||
dev->irq = res_irq->start;
|
||||
priv->irq_flags = res_irq->flags & IRQF_TRIGGER_MASK;
|
||||
priv->reg_base = addr;
|
||||
priv->read_reg = sp_read_reg;
|
||||
priv->write_reg = sp_write_reg;
|
||||
priv->can.clock.freq = pdata->clock;
|
||||
|
@ -108,8 +108,8 @@ static int sp_probe(struct platform_device *pdev)
|
|||
goto exit_free;
|
||||
}
|
||||
|
||||
dev_info(&pdev->dev, "%s device registered (base_addr=%#lx, irq=%d)\n",
|
||||
DRV_NAME, dev->base_addr, dev->irq);
|
||||
dev_info(&pdev->dev, "%s device registered (reg_base=%p, irq=%d)\n",
|
||||
DRV_NAME, priv->reg_base, dev->irq);
|
||||
return 0;
|
||||
|
||||
exit_free:
|
||||
|
@ -125,13 +125,14 @@ static int sp_probe(struct platform_device *pdev)
|
|||
static int sp_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct net_device *dev = dev_get_drvdata(&pdev->dev);
|
||||
struct sja1000_priv *priv = netdev_priv(dev);
|
||||
struct resource *res;
|
||||
|
||||
unregister_sja1000dev(dev);
|
||||
dev_set_drvdata(&pdev->dev, NULL);
|
||||
|
||||
if (dev->base_addr)
|
||||
iounmap((void __iomem *)dev->base_addr);
|
||||
if (priv->reg_base)
|
||||
iounmap(priv->reg_base);
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
release_mem_region(res->start, resource_size(res));
|
||||
|
|
|
@ -1879,7 +1879,6 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
cpl->vlan_valid = 0;
|
||||
|
||||
send:
|
||||
dev->trans_start = jiffies;
|
||||
ret = t1_sge_tx(skb, adapter, 0, dev);
|
||||
|
||||
/* If transmit busy, and we reallocated skb's due to headroom limit,
|
||||
|
|
|
@ -615,13 +615,13 @@ static void cpmac_end_xmit(struct net_device *dev, int queue)
|
|||
|
||||
dev_kfree_skb_irq(desc->skb);
|
||||
desc->skb = NULL;
|
||||
if (netif_subqueue_stopped(dev, queue))
|
||||
if (__netif_subqueue_stopped(dev, queue))
|
||||
netif_wake_subqueue(dev, queue);
|
||||
} else {
|
||||
if (netif_msg_tx_err(priv) && net_ratelimit())
|
||||
printk(KERN_WARNING
|
||||
"%s: end_xmit: spurious interrupt\n", dev->name);
|
||||
if (netif_subqueue_stopped(dev, queue))
|
||||
if (__netif_subqueue_stopped(dev, queue))
|
||||
netif_wake_subqueue(dev, queue);
|
||||
}
|
||||
}
|
||||
|
@ -731,7 +731,6 @@ static void cpmac_clear_tx(struct net_device *dev)
|
|||
|
||||
static void cpmac_hw_error(struct work_struct *work)
|
||||
{
|
||||
int i;
|
||||
struct cpmac_priv *priv =
|
||||
container_of(work, struct cpmac_priv, reset_work);
|
||||
|
||||
|
@ -818,7 +817,6 @@ static irqreturn_t cpmac_irq(int irq, void *dev_id)
|
|||
|
||||
static void cpmac_tx_timeout(struct net_device *dev)
|
||||
{
|
||||
int i;
|
||||
struct cpmac_priv *priv = netdev_priv(dev);
|
||||
|
||||
spin_lock(&priv->lock);
|
||||
|
@ -1110,7 +1108,7 @@ static int external_switch;
|
|||
|
||||
static int __devinit cpmac_probe(struct platform_device *pdev)
|
||||
{
|
||||
int rc, phy_id, i;
|
||||
int rc, phy_id;
|
||||
char *mdio_bus_id = "0";
|
||||
struct resource *mem;
|
||||
struct cpmac_priv *priv;
|
||||
|
|
|
@ -5,4 +5,4 @@
|
|||
obj-$(CONFIG_CHELSIO_T3) += cxgb3.o
|
||||
|
||||
cxgb3-objs := cxgb3_main.o ael1002.o vsc8211.o t3_hw.o mc5.o \
|
||||
xgmac.o sge.o l2t.o cxgb3_offload.o
|
||||
xgmac.o sge.o l2t.o cxgb3_offload.o aq100x.o
|
||||
|
|
|
@ -85,8 +85,8 @@ struct fl_pg_chunk {
|
|||
struct page *page;
|
||||
void *va;
|
||||
unsigned int offset;
|
||||
u64 *p_cnt;
|
||||
DECLARE_PCI_UNMAP_ADDR(mapping);
|
||||
unsigned long *p_cnt;
|
||||
dma_addr_t mapping;
|
||||
};
|
||||
|
||||
struct rx_desc;
|
||||
|
@ -253,6 +253,8 @@ struct adapter {
|
|||
struct mutex mdio_lock;
|
||||
spinlock_t stats_lock;
|
||||
spinlock_t work_lock;
|
||||
|
||||
struct sk_buff *nofail_skb;
|
||||
};
|
||||
|
||||
static inline u32 t3_read_reg(struct adapter *adapter, u32 reg_addr)
|
||||
|
|
|
@ -44,12 +44,33 @@ enum {
|
|||
AEL_I2C_STAT = 0xc30c,
|
||||
AEL2005_GPIO_CTRL = 0xc214,
|
||||
AEL2005_GPIO_STAT = 0xc215,
|
||||
|
||||
AEL2020_GPIO_INTR = 0xc103, /* Latch High (LH) */
|
||||
AEL2020_GPIO_CTRL = 0xc108, /* Store Clear (SC) */
|
||||
AEL2020_GPIO_STAT = 0xc10c, /* Read Only (RO) */
|
||||
AEL2020_GPIO_CFG = 0xc110, /* Read Write (RW) */
|
||||
|
||||
AEL2020_GPIO_SDA = 0, /* IN: i2c serial data */
|
||||
AEL2020_GPIO_MODDET = 1, /* IN: Module Detect */
|
||||
AEL2020_GPIO_0 = 3, /* IN: unassigned */
|
||||
AEL2020_GPIO_1 = 2, /* OUT: unassigned */
|
||||
AEL2020_GPIO_LSTAT = AEL2020_GPIO_1, /* wired to link status LED */
|
||||
};
|
||||
|
||||
enum { edc_none, edc_sr, edc_twinax };
|
||||
|
||||
/* PHY module I2C device address */
|
||||
#define MODULE_DEV_ADDR 0xa0
|
||||
enum {
|
||||
MODULE_DEV_ADDR = 0xa0,
|
||||
SFF_DEV_ADDR = 0xa2,
|
||||
};
|
||||
|
||||
/* PHY transceiver type */
|
||||
enum {
|
||||
phy_transtype_unknown = 0,
|
||||
phy_transtype_sfp = 3,
|
||||
phy_transtype_xfp = 6,
|
||||
};
|
||||
|
||||
#define AEL2005_MODDET_IRQ 4
|
||||
|
||||
|
@ -86,6 +107,37 @@ static void ael100x_txon(struct cphy *phy)
|
|||
msleep(30);
|
||||
}
|
||||
|
||||
/*
|
||||
* Read an 8-bit word from a device attached to the PHY's i2c bus.
|
||||
*/
|
||||
static int ael_i2c_rd(struct cphy *phy, int dev_addr, int word_addr)
|
||||
{
|
||||
int i, err;
|
||||
unsigned int stat, data;
|
||||
|
||||
err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL_I2C_CTRL,
|
||||
(dev_addr << 8) | (1 << 8) | word_addr);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
for (i = 0; i < 200; i++) {
|
||||
msleep(1);
|
||||
err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL_I2C_STAT, &stat);
|
||||
if (err)
|
||||
return err;
|
||||
if ((stat & 3) == 1) {
|
||||
err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL_I2C_DATA,
|
||||
&data);
|
||||
if (err)
|
||||
return err;
|
||||
return data >> 8;
|
||||
}
|
||||
}
|
||||
CH_WARN(phy->adapter, "PHY %u i2c read of dev.addr %#x.%#x timed out\n",
|
||||
phy->mdio.prtad, dev_addr, word_addr);
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
static int ael1002_power_down(struct cphy *phy, int enable)
|
||||
{
|
||||
int err;
|
||||
|
@ -199,6 +251,51 @@ int t3_ael1006_phy_prep(struct cphy *phy, struct adapter *adapter,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Decode our module type.
|
||||
*/
|
||||
static int ael2xxx_get_module_type(struct cphy *phy, int delay_ms)
|
||||
{
|
||||
int v;
|
||||
|
||||
if (delay_ms)
|
||||
msleep(delay_ms);
|
||||
|
||||
/* see SFF-8472 for below */
|
||||
v = ael_i2c_rd(phy, MODULE_DEV_ADDR, 3);
|
||||
if (v < 0)
|
||||
return v;
|
||||
|
||||
if (v == 0x10)
|
||||
return phy_modtype_sr;
|
||||
if (v == 0x20)
|
||||
return phy_modtype_lr;
|
||||
if (v == 0x40)
|
||||
return phy_modtype_lrm;
|
||||
|
||||
v = ael_i2c_rd(phy, MODULE_DEV_ADDR, 6);
|
||||
if (v < 0)
|
||||
return v;
|
||||
if (v != 4)
|
||||
goto unknown;
|
||||
|
||||
v = ael_i2c_rd(phy, MODULE_DEV_ADDR, 10);
|
||||
if (v < 0)
|
||||
return v;
|
||||
|
||||
if (v & 0x80) {
|
||||
v = ael_i2c_rd(phy, MODULE_DEV_ADDR, 0x12);
|
||||
if (v < 0)
|
||||
return v;
|
||||
return v > 10 ? phy_modtype_twinax_long : phy_modtype_twinax;
|
||||
}
|
||||
unknown:
|
||||
return phy_modtype_unknown;
|
||||
}
|
||||
|
||||
/*
|
||||
* Code to support the Aeluros/NetLogic 2005 10Gb PHY.
|
||||
*/
|
||||
static int ael2005_setup_sr_edc(struct cphy *phy)
|
||||
{
|
||||
static struct reg_val regs[] = {
|
||||
|
@ -893,35 +990,7 @@ static int ael2005_setup_twinax_edc(struct cphy *phy, int modtype)
|
|||
return err;
|
||||
}
|
||||
|
||||
static int ael2005_i2c_rd(struct cphy *phy, int dev_addr, int word_addr)
|
||||
{
|
||||
int i, err;
|
||||
unsigned int stat, data;
|
||||
|
||||
err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL_I2C_CTRL,
|
||||
(dev_addr << 8) | (1 << 8) | word_addr);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
for (i = 0; i < 5; i++) {
|
||||
msleep(1);
|
||||
err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL_I2C_STAT, &stat);
|
||||
if (err)
|
||||
return err;
|
||||
if ((stat & 3) == 1) {
|
||||
err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL_I2C_DATA,
|
||||
&data);
|
||||
if (err)
|
||||
return err;
|
||||
return data >> 8;
|
||||
}
|
||||
}
|
||||
CH_WARN(phy->adapter, "PHY %u I2C read of addr %u timed out\n",
|
||||
phy->mdio.prtad, word_addr);
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
static int get_module_type(struct cphy *phy, int delay_ms)
|
||||
static int ael2005_get_module_type(struct cphy *phy, int delay_ms)
|
||||
{
|
||||
int v;
|
||||
unsigned int stat;
|
||||
|
@ -933,39 +1002,7 @@ static int get_module_type(struct cphy *phy, int delay_ms)
|
|||
if (stat & (1 << 8)) /* module absent */
|
||||
return phy_modtype_none;
|
||||
|
||||
if (delay_ms)
|
||||
msleep(delay_ms);
|
||||
|
||||
/* see SFF-8472 for below */
|
||||
v = ael2005_i2c_rd(phy, MODULE_DEV_ADDR, 3);
|
||||
if (v < 0)
|
||||
return v;
|
||||
|
||||
if (v == 0x10)
|
||||
return phy_modtype_sr;
|
||||
if (v == 0x20)
|
||||
return phy_modtype_lr;
|
||||
if (v == 0x40)
|
||||
return phy_modtype_lrm;
|
||||
|
||||
v = ael2005_i2c_rd(phy, MODULE_DEV_ADDR, 6);
|
||||
if (v < 0)
|
||||
return v;
|
||||
if (v != 4)
|
||||
goto unknown;
|
||||
|
||||
v = ael2005_i2c_rd(phy, MODULE_DEV_ADDR, 10);
|
||||
if (v < 0)
|
||||
return v;
|
||||
|
||||
if (v & 0x80) {
|
||||
v = ael2005_i2c_rd(phy, MODULE_DEV_ADDR, 0x12);
|
||||
if (v < 0)
|
||||
return v;
|
||||
return v > 10 ? phy_modtype_twinax_long : phy_modtype_twinax;
|
||||
}
|
||||
unknown:
|
||||
return phy_modtype_unknown;
|
||||
return ael2xxx_get_module_type(phy, delay_ms);
|
||||
}
|
||||
|
||||
static int ael2005_intr_enable(struct cphy *phy)
|
||||
|
@ -1024,7 +1061,7 @@ static int ael2005_reset(struct cphy *phy, int wait)
|
|||
|
||||
msleep(50);
|
||||
|
||||
err = get_module_type(phy, 0);
|
||||
err = ael2005_get_module_type(phy, 0);
|
||||
if (err < 0)
|
||||
return err;
|
||||
phy->modtype = err;
|
||||
|
@ -1062,7 +1099,7 @@ static int ael2005_intr_handler(struct cphy *phy)
|
|||
return ret;
|
||||
|
||||
/* modules have max 300 ms init time after hot plug */
|
||||
ret = get_module_type(phy, 300);
|
||||
ret = ael2005_get_module_type(phy, 300);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
|
@ -1112,6 +1149,662 @@ int t3_ael2005_phy_prep(struct cphy *phy, struct adapter *adapter,
|
|||
1 << 5);
|
||||
}
|
||||
|
||||
/*
|
||||
* Setup EDC and other parameters for operation with an optical module.
|
||||
*/
|
||||
static int ael2020_setup_sr_edc(struct cphy *phy)
|
||||
{
|
||||
static struct reg_val regs[] = {
|
||||
/* set CDR offset to 10 */
|
||||
{ MDIO_MMD_PMAPMD, 0xcc01, 0xffff, 0x488a },
|
||||
|
||||
/* adjust 10G RX bias current */
|
||||
{ MDIO_MMD_PMAPMD, 0xcb1b, 0xffff, 0x0200 },
|
||||
{ MDIO_MMD_PMAPMD, 0xcb1c, 0xffff, 0x00f0 },
|
||||
{ MDIO_MMD_PMAPMD, 0xcc06, 0xffff, 0x00e0 },
|
||||
|
||||
/* end */
|
||||
{ 0, 0, 0, 0 }
|
||||
};
|
||||
int err;
|
||||
|
||||
err = set_phy_regs(phy, regs);
|
||||
msleep(50);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
phy->priv = edc_sr;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Setup EDC and other parameters for operation with an TWINAX module.
|
||||
*/
|
||||
static int ael2020_setup_twinax_edc(struct cphy *phy, int modtype)
|
||||
{
|
||||
/* set uC to 40MHz */
|
||||
static struct reg_val uCclock40MHz[] = {
|
||||
{ MDIO_MMD_PMAPMD, 0xff28, 0xffff, 0x4001 },
|
||||
{ MDIO_MMD_PMAPMD, 0xff2a, 0xffff, 0x0002 },
|
||||
{ 0, 0, 0, 0 }
|
||||
};
|
||||
|
||||
/* activate uC clock */
|
||||
static struct reg_val uCclockActivate[] = {
|
||||
{ MDIO_MMD_PMAPMD, 0xd000, 0xffff, 0x5200 },
|
||||
{ 0, 0, 0, 0 }
|
||||
};
|
||||
|
||||
/* set PC to start of SRAM and activate uC */
|
||||
static struct reg_val uCactivate[] = {
|
||||
{ MDIO_MMD_PMAPMD, 0xd080, 0xffff, 0x0100 },
|
||||
{ MDIO_MMD_PMAPMD, 0xd092, 0xffff, 0x0000 },
|
||||
{ 0, 0, 0, 0 }
|
||||
};
|
||||
|
||||
/* TWINAX EDC firmware */
|
||||
static u16 twinax_edc[] = {
|
||||
0xd800, 0x4009,
|
||||
0xd801, 0x2fff,
|
||||
0xd802, 0x300f,
|
||||
0xd803, 0x40aa,
|
||||
0xd804, 0x401c,
|
||||
0xd805, 0x401e,
|
||||
0xd806, 0x2ff4,
|
||||
0xd807, 0x3dc4,
|
||||
0xd808, 0x2035,
|
||||
0xd809, 0x3035,
|
||||
0xd80a, 0x6524,
|
||||
0xd80b, 0x2cb2,
|
||||
0xd80c, 0x3012,
|
||||
0xd80d, 0x1002,
|
||||
0xd80e, 0x26e2,
|
||||
0xd80f, 0x3022,
|
||||
0xd810, 0x1002,
|
||||
0xd811, 0x27d2,
|
||||
0xd812, 0x3022,
|
||||
0xd813, 0x1002,
|
||||
0xd814, 0x2822,
|
||||
0xd815, 0x3012,
|
||||
0xd816, 0x1002,
|
||||
0xd817, 0x2492,
|
||||
0xd818, 0x3022,
|
||||
0xd819, 0x1002,
|
||||
0xd81a, 0x2772,
|
||||
0xd81b, 0x3012,
|
||||
0xd81c, 0x1002,
|
||||
0xd81d, 0x23d2,
|
||||
0xd81e, 0x3022,
|
||||
0xd81f, 0x1002,
|
||||
0xd820, 0x22cd,
|
||||
0xd821, 0x301d,
|
||||
0xd822, 0x27f2,
|
||||
0xd823, 0x3022,
|
||||
0xd824, 0x1002,
|
||||
0xd825, 0x5553,
|
||||
0xd826, 0x0307,
|
||||
0xd827, 0x2522,
|
||||
0xd828, 0x3022,
|
||||
0xd829, 0x1002,
|
||||
0xd82a, 0x2142,
|
||||
0xd82b, 0x3012,
|
||||
0xd82c, 0x1002,
|
||||
0xd82d, 0x4016,
|
||||
0xd82e, 0x5e63,
|
||||
0xd82f, 0x0344,
|
||||
0xd830, 0x2142,
|
||||
0xd831, 0x3012,
|
||||
0xd832, 0x1002,
|
||||
0xd833, 0x400e,
|
||||
0xd834, 0x2522,
|
||||
0xd835, 0x3022,
|
||||
0xd836, 0x1002,
|
||||
0xd837, 0x2b52,
|
||||
0xd838, 0x3012,
|
||||
0xd839, 0x1002,
|
||||
0xd83a, 0x2742,
|
||||
0xd83b, 0x3022,
|
||||
0xd83c, 0x1002,
|
||||
0xd83d, 0x25e2,
|
||||
0xd83e, 0x3022,
|
||||
0xd83f, 0x1002,
|
||||
0xd840, 0x2fa4,
|
||||
0xd841, 0x3dc4,
|
||||
0xd842, 0x6624,
|
||||
0xd843, 0x414b,
|
||||
0xd844, 0x56b3,
|
||||
0xd845, 0x03c6,
|
||||
0xd846, 0x866b,
|
||||
0xd847, 0x400c,
|
||||
0xd848, 0x2712,
|
||||
0xd849, 0x3012,
|
||||
0xd84a, 0x1002,
|
||||
0xd84b, 0x2c4b,
|
||||
0xd84c, 0x309b,
|
||||
0xd84d, 0x56b3,
|
||||
0xd84e, 0x03c3,
|
||||
0xd84f, 0x866b,
|
||||
0xd850, 0x400c,
|
||||
0xd851, 0x2272,
|
||||
0xd852, 0x3022,
|
||||
0xd853, 0x1002,
|
||||
0xd854, 0x2742,
|
||||
0xd855, 0x3022,
|
||||
0xd856, 0x1002,
|
||||
0xd857, 0x25e2,
|
||||
0xd858, 0x3022,
|
||||
0xd859, 0x1002,
|
||||
0xd85a, 0x2fb4,
|
||||
0xd85b, 0x3dc4,
|
||||
0xd85c, 0x6624,
|
||||
0xd85d, 0x56b3,
|
||||
0xd85e, 0x03c3,
|
||||
0xd85f, 0x866b,
|
||||
0xd860, 0x401c,
|
||||
0xd861, 0x2c45,
|
||||
0xd862, 0x3095,
|
||||
0xd863, 0x5b53,
|
||||
0xd864, 0x2372,
|
||||
0xd865, 0x3012,
|
||||
0xd866, 0x13c2,
|
||||
0xd867, 0x5cc3,
|
||||
0xd868, 0x2712,
|
||||
0xd869, 0x3012,
|
||||
0xd86a, 0x1312,
|
||||
0xd86b, 0x2b52,
|
||||
0xd86c, 0x3012,
|
||||
0xd86d, 0x1002,
|
||||
0xd86e, 0x2742,
|
||||
0xd86f, 0x3022,
|
||||
0xd870, 0x1002,
|
||||
0xd871, 0x2582,
|
||||
0xd872, 0x3022,
|
||||
0xd873, 0x1002,
|
||||
0xd874, 0x2142,
|
||||
0xd875, 0x3012,
|
||||
0xd876, 0x1002,
|
||||
0xd877, 0x628f,
|
||||
0xd878, 0x2985,
|
||||
0xd879, 0x33a5,
|
||||
0xd87a, 0x25e2,
|
||||
0xd87b, 0x3022,
|
||||
0xd87c, 0x1002,
|
||||
0xd87d, 0x5653,
|
||||
0xd87e, 0x03d2,
|
||||
0xd87f, 0x401e,
|
||||
0xd880, 0x6f72,
|
||||
0xd881, 0x1002,
|
||||
0xd882, 0x628f,
|
||||
0xd883, 0x2304,
|
||||
0xd884, 0x3c84,
|
||||
0xd885, 0x6436,
|
||||
0xd886, 0xdff4,
|
||||
0xd887, 0x6436,
|
||||
0xd888, 0x2ff5,
|
||||
0xd889, 0x3005,
|
||||
0xd88a, 0x8656,
|
||||
0xd88b, 0xdfba,
|
||||
0xd88c, 0x56a3,
|
||||
0xd88d, 0xd05a,
|
||||
0xd88e, 0x2972,
|
||||
0xd88f, 0x3012,
|
||||
0xd890, 0x1392,
|
||||
0xd891, 0xd05a,
|
||||
0xd892, 0x56a3,
|
||||
0xd893, 0xdfba,
|
||||
0xd894, 0x0383,
|
||||
0xd895, 0x6f72,
|
||||
0xd896, 0x1002,
|
||||
0xd897, 0x2b45,
|
||||
0xd898, 0x3005,
|
||||
0xd899, 0x4178,
|
||||
0xd89a, 0x5653,
|
||||
0xd89b, 0x0384,
|
||||
0xd89c, 0x2a62,
|
||||
0xd89d, 0x3012,
|
||||
0xd89e, 0x1002,
|
||||
0xd89f, 0x2f05,
|
||||
0xd8a0, 0x3005,
|
||||
0xd8a1, 0x41c8,
|
||||
0xd8a2, 0x5653,
|
||||
0xd8a3, 0x0382,
|
||||
0xd8a4, 0x0002,
|
||||
0xd8a5, 0x4218,
|
||||
0xd8a6, 0x2474,
|
||||
0xd8a7, 0x3c84,
|
||||
0xd8a8, 0x6437,
|
||||
0xd8a9, 0xdff4,
|
||||
0xd8aa, 0x6437,
|
||||
0xd8ab, 0x2ff5,
|
||||
0xd8ac, 0x3c05,
|
||||
0xd8ad, 0x8757,
|
||||
0xd8ae, 0xb888,
|
||||
0xd8af, 0x9787,
|
||||
0xd8b0, 0xdff4,
|
||||
0xd8b1, 0x6724,
|
||||
0xd8b2, 0x866a,
|
||||
0xd8b3, 0x6f72,
|
||||
0xd8b4, 0x1002,
|
||||
0xd8b5, 0x2641,
|
||||
0xd8b6, 0x3021,
|
||||
0xd8b7, 0x1001,
|
||||
0xd8b8, 0xc620,
|
||||
0xd8b9, 0x0000,
|
||||
0xd8ba, 0xc621,
|
||||
0xd8bb, 0x0000,
|
||||
0xd8bc, 0xc622,
|
||||
0xd8bd, 0x00ce,
|
||||
0xd8be, 0xc623,
|
||||
0xd8bf, 0x007f,
|
||||
0xd8c0, 0xc624,
|
||||
0xd8c1, 0x0032,
|
||||
0xd8c2, 0xc625,
|
||||
0xd8c3, 0x0000,
|
||||
0xd8c4, 0xc627,
|
||||
0xd8c5, 0x0000,
|
||||
0xd8c6, 0xc628,
|
||||
0xd8c7, 0x0000,
|
||||
0xd8c8, 0xc62c,
|
||||
0xd8c9, 0x0000,
|
||||
0xd8ca, 0x0000,
|
||||
0xd8cb, 0x2641,
|
||||
0xd8cc, 0x3021,
|
||||
0xd8cd, 0x1001,
|
||||
0xd8ce, 0xc502,
|
||||
0xd8cf, 0x53ac,
|
||||
0xd8d0, 0xc503,
|
||||
0xd8d1, 0x2cd3,
|
||||
0xd8d2, 0xc600,
|
||||
0xd8d3, 0x2a6e,
|
||||
0xd8d4, 0xc601,
|
||||
0xd8d5, 0x2a2c,
|
||||
0xd8d6, 0xc605,
|
||||
0xd8d7, 0x5557,
|
||||
0xd8d8, 0xc60c,
|
||||
0xd8d9, 0x5400,
|
||||
0xd8da, 0xc710,
|
||||
0xd8db, 0x0700,
|
||||
0xd8dc, 0xc711,
|
||||
0xd8dd, 0x0f06,
|
||||
0xd8de, 0xc718,
|
||||
0xd8df, 0x0700,
|
||||
0xd8e0, 0xc719,
|
||||
0xd8e1, 0x0f06,
|
||||
0xd8e2, 0xc720,
|
||||
0xd8e3, 0x4700,
|
||||
0xd8e4, 0xc721,
|
||||
0xd8e5, 0x0f06,
|
||||
0xd8e6, 0xc728,
|
||||
0xd8e7, 0x0700,
|
||||
0xd8e8, 0xc729,
|
||||
0xd8e9, 0x1207,
|
||||
0xd8ea, 0xc801,
|
||||
0xd8eb, 0x7f50,
|
||||
0xd8ec, 0xc802,
|
||||
0xd8ed, 0x7760,
|
||||
0xd8ee, 0xc803,
|
||||
0xd8ef, 0x7fce,
|
||||
0xd8f0, 0xc804,
|
||||
0xd8f1, 0x520e,
|
||||
0xd8f2, 0xc805,
|
||||
0xd8f3, 0x5c11,
|
||||
0xd8f4, 0xc806,
|
||||
0xd8f5, 0x3c51,
|
||||
0xd8f6, 0xc807,
|
||||
0xd8f7, 0x4061,
|
||||
0xd8f8, 0xc808,
|
||||
0xd8f9, 0x49c1,
|
||||
0xd8fa, 0xc809,
|
||||
0xd8fb, 0x3840,
|
||||
0xd8fc, 0xc80a,
|
||||
0xd8fd, 0x0000,
|
||||
0xd8fe, 0xc821,
|
||||
0xd8ff, 0x0002,
|
||||
0xd900, 0xc822,
|
||||
0xd901, 0x0046,
|
||||
0xd902, 0xc844,
|
||||
0xd903, 0x182f,
|
||||
0xd904, 0xc013,
|
||||
0xd905, 0xf341,
|
||||
0xd906, 0xc084,
|
||||
0xd907, 0x0030,
|
||||
0xd908, 0xc904,
|
||||
0xd909, 0x1401,
|
||||
0xd90a, 0xcb0c,
|
||||
0xd90b, 0x0004,
|
||||
0xd90c, 0xcb0e,
|
||||
0xd90d, 0xa00a,
|
||||
0xd90e, 0xcb0f,
|
||||
0xd90f, 0xc0c0,
|
||||
0xd910, 0xcb10,
|
||||
0xd911, 0xc0c0,
|
||||
0xd912, 0xcb11,
|
||||
0xd913, 0x00a0,
|
||||
0xd914, 0xcb12,
|
||||
0xd915, 0x0007,
|
||||
0xd916, 0xc241,
|
||||
0xd917, 0xa000,
|
||||
0xd918, 0xc243,
|
||||
0xd919, 0x7fe0,
|
||||
0xd91a, 0xc604,
|
||||
0xd91b, 0x000e,
|
||||
0xd91c, 0xc609,
|
||||
0xd91d, 0x00f5,
|
||||
0xd91e, 0xc611,
|
||||
0xd91f, 0x000e,
|
||||
0xd920, 0xc660,
|
||||
0xd921, 0x9600,
|
||||
0xd922, 0xc687,
|
||||
0xd923, 0x0004,
|
||||
0xd924, 0xc60a,
|
||||
0xd925, 0x04f5,
|
||||
0xd926, 0x0000,
|
||||
0xd927, 0x2641,
|
||||
0xd928, 0x3021,
|
||||
0xd929, 0x1001,
|
||||
0xd92a, 0xc620,
|
||||
0xd92b, 0x14e5,
|
||||
0xd92c, 0xc621,
|
||||
0xd92d, 0xc53d,
|
||||
0xd92e, 0xc622,
|
||||
0xd92f, 0x3cbe,
|
||||
0xd930, 0xc623,
|
||||
0xd931, 0x4452,
|
||||
0xd932, 0xc624,
|
||||
0xd933, 0xc5c5,
|
||||
0xd934, 0xc625,
|
||||
0xd935, 0xe01e,
|
||||
0xd936, 0xc627,
|
||||
0xd937, 0x0000,
|
||||
0xd938, 0xc628,
|
||||
0xd939, 0x0000,
|
||||
0xd93a, 0xc62c,
|
||||
0xd93b, 0x0000,
|
||||
0xd93c, 0x0000,
|
||||
0xd93d, 0x2b84,
|
||||
0xd93e, 0x3c74,
|
||||
0xd93f, 0x6435,
|
||||
0xd940, 0xdff4,
|
||||
0xd941, 0x6435,
|
||||
0xd942, 0x2806,
|
||||
0xd943, 0x3006,
|
||||
0xd944, 0x8565,
|
||||
0xd945, 0x2b24,
|
||||
0xd946, 0x3c24,
|
||||
0xd947, 0x6436,
|
||||
0xd948, 0x1002,
|
||||
0xd949, 0x2b24,
|
||||
0xd94a, 0x3c24,
|
||||
0xd94b, 0x6436,
|
||||
0xd94c, 0x4045,
|
||||
0xd94d, 0x8656,
|
||||
0xd94e, 0x5663,
|
||||
0xd94f, 0x0302,
|
||||
0xd950, 0x401e,
|
||||
0xd951, 0x1002,
|
||||
0xd952, 0x2807,
|
||||
0xd953, 0x31a7,
|
||||
0xd954, 0x20c4,
|
||||
0xd955, 0x3c24,
|
||||
0xd956, 0x6724,
|
||||
0xd957, 0x1002,
|
||||
0xd958, 0x2807,
|
||||
0xd959, 0x3187,
|
||||
0xd95a, 0x20c4,
|
||||
0xd95b, 0x3c24,
|
||||
0xd95c, 0x6724,
|
||||
0xd95d, 0x1002,
|
||||
0xd95e, 0x24f4,
|
||||
0xd95f, 0x3c64,
|
||||
0xd960, 0x6436,
|
||||
0xd961, 0xdff4,
|
||||
0xd962, 0x6436,
|
||||
0xd963, 0x1002,
|
||||
0xd964, 0x2006,
|
||||
0xd965, 0x3d76,
|
||||
0xd966, 0xc161,
|
||||
0xd967, 0x6134,
|
||||
0xd968, 0x6135,
|
||||
0xd969, 0x5443,
|
||||
0xd96a, 0x0303,
|
||||
0xd96b, 0x6524,
|
||||
0xd96c, 0x00fb,
|
||||
0xd96d, 0x1002,
|
||||
0xd96e, 0x20d4,
|
||||
0xd96f, 0x3c24,
|
||||
0xd970, 0x2025,
|
||||
0xd971, 0x3005,
|
||||
0xd972, 0x6524,
|
||||
0xd973, 0x1002,
|
||||
0xd974, 0xd019,
|
||||
0xd975, 0x2104,
|
||||
0xd976, 0x3c24,
|
||||
0xd977, 0x2105,
|
||||
0xd978, 0x3805,
|
||||
0xd979, 0x6524,
|
||||
0xd97a, 0xdff4,
|
||||
0xd97b, 0x4005,
|
||||
0xd97c, 0x6524,
|
||||
0xd97d, 0x2e8d,
|
||||
0xd97e, 0x303d,
|
||||
0xd97f, 0x2408,
|
||||
0xd980, 0x35d8,
|
||||
0xd981, 0x5dd3,
|
||||
0xd982, 0x0307,
|
||||
0xd983, 0x8887,
|
||||
0xd984, 0x63a7,
|
||||
0xd985, 0x8887,
|
||||
0xd986, 0x63a7,
|
||||
0xd987, 0xdffd,
|
||||
0xd988, 0x00f9,
|
||||
0xd989, 0x1002,
|
||||
0xd98a, 0x0000,
|
||||
};
|
||||
int i, err;
|
||||
|
||||
/* set uC clock and activate it */
|
||||
err = set_phy_regs(phy, uCclock40MHz);
|
||||
msleep(500);
|
||||
if (err)
|
||||
return err;
|
||||
err = set_phy_regs(phy, uCclockActivate);
|
||||
msleep(500);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* write TWINAX EDC firmware into PHY */
|
||||
for (i = 0; i < ARRAY_SIZE(twinax_edc) && !err; i += 2)
|
||||
err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, twinax_edc[i],
|
||||
twinax_edc[i + 1]);
|
||||
/* activate uC */
|
||||
err = set_phy_regs(phy, uCactivate);
|
||||
if (!err)
|
||||
phy->priv = edc_twinax;
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return Module Type.
|
||||
*/
|
||||
static int ael2020_get_module_type(struct cphy *phy, int delay_ms)
|
||||
{
|
||||
int v;
|
||||
unsigned int stat;
|
||||
|
||||
v = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL2020_GPIO_STAT, &stat);
|
||||
if (v)
|
||||
return v;
|
||||
|
||||
if (stat & (0x1 << (AEL2020_GPIO_MODDET*4))) {
|
||||
/* module absent */
|
||||
return phy_modtype_none;
|
||||
}
|
||||
|
||||
return ael2xxx_get_module_type(phy, delay_ms);
|
||||
}
|
||||
|
||||
/*
|
||||
* Enable PHY interrupts. We enable "Module Detection" interrupts (on any
|
||||
* state transition) and then generic Link Alarm Status Interrupt (LASI).
|
||||
*/
|
||||
static int ael2020_intr_enable(struct cphy *phy)
|
||||
{
|
||||
int err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL2020_GPIO_CTRL,
|
||||
0x2 << (AEL2020_GPIO_MODDET*4));
|
||||
return err ? err : t3_phy_lasi_intr_enable(phy);
|
||||
}
|
||||
|
||||
/*
|
||||
* Disable PHY interrupts. The mirror of the above ...
|
||||
*/
|
||||
static int ael2020_intr_disable(struct cphy *phy)
|
||||
{
|
||||
int err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL2020_GPIO_CTRL,
|
||||
0x1 << (AEL2020_GPIO_MODDET*4));
|
||||
return err ? err : t3_phy_lasi_intr_disable(phy);
|
||||
}
|
||||
|
||||
/*
|
||||
* Clear PHY interrupt state.
|
||||
*/
|
||||
static int ael2020_intr_clear(struct cphy *phy)
|
||||
{
|
||||
/*
|
||||
* The GPIO Interrupt register on the AEL2020 is a "Latching High"
|
||||
* (LH) register which is cleared to the current state when it's read.
|
||||
* Thus, we simply read the register and discard the result.
|
||||
*/
|
||||
unsigned int stat;
|
||||
int err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL2020_GPIO_INTR, &stat);
|
||||
return err ? err : t3_phy_lasi_intr_clear(phy);
|
||||
}
|
||||
|
||||
/*
|
||||
* Reset the PHY and put it into a canonical operating state.
|
||||
*/
|
||||
static int ael2020_reset(struct cphy *phy, int wait)
|
||||
{
|
||||
static struct reg_val regs0[] = {
|
||||
/* Erratum #2: CDRLOL asserted, causing PMA link down status */
|
||||
{ MDIO_MMD_PMAPMD, 0xc003, 0xffff, 0x3101 },
|
||||
|
||||
/* force XAUI to send LF when RX_LOS is asserted */
|
||||
{ MDIO_MMD_PMAPMD, 0xcd40, 0xffff, 0x0001 },
|
||||
|
||||
/* RX_LOS pin is active high */
|
||||
{ MDIO_MMD_PMAPMD, AEL_OPT_SETTINGS,
|
||||
0x0020, 0x0020 },
|
||||
|
||||
/* output Module's Loss Of Signal (LOS) to LED */
|
||||
{ MDIO_MMD_PMAPMD, AEL2020_GPIO_CFG+AEL2020_GPIO_LSTAT,
|
||||
0xffff, 0x0004 },
|
||||
{ MDIO_MMD_PMAPMD, AEL2020_GPIO_CTRL,
|
||||
0xffff, 0x8 << (AEL2020_GPIO_LSTAT*4) },
|
||||
|
||||
/* end */
|
||||
{ 0, 0, 0, 0 }
|
||||
};
|
||||
int err;
|
||||
unsigned int lasi_ctrl;
|
||||
|
||||
/* grab current interrupt state */
|
||||
err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL,
|
||||
&lasi_ctrl);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = t3_phy_reset(phy, MDIO_MMD_PMAPMD, 125);
|
||||
if (err)
|
||||
return err;
|
||||
msleep(100);
|
||||
|
||||
/* basic initialization for all module types */
|
||||
phy->priv = edc_none;
|
||||
err = set_phy_regs(phy, regs0);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* determine module type and perform appropriate initialization */
|
||||
err = ael2020_get_module_type(phy, 0);
|
||||
if (err < 0)
|
||||
return err;
|
||||
phy->modtype = (u8)err;
|
||||
if (err == phy_modtype_twinax || err == phy_modtype_twinax_long)
|
||||
err = ael2020_setup_twinax_edc(phy, err);
|
||||
else
|
||||
err = ael2020_setup_sr_edc(phy);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* reset wipes out interrupts, reenable them if they were on */
|
||||
if (lasi_ctrl & 1)
|
||||
err = ael2005_intr_enable(phy);
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
* Handle a PHY interrupt.
|
||||
*/
|
||||
static int ael2020_intr_handler(struct cphy *phy)
|
||||
{
|
||||
unsigned int stat;
|
||||
int ret, edc_needed, cause = 0;
|
||||
|
||||
ret = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL2020_GPIO_INTR, &stat);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (stat & (0x1 << AEL2020_GPIO_MODDET)) {
|
||||
/* modules have max 300 ms init time after hot plug */
|
||||
ret = ael2020_get_module_type(phy, 300);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
phy->modtype = (u8)ret;
|
||||
if (ret == phy_modtype_none)
|
||||
edc_needed = phy->priv; /* on unplug retain EDC */
|
||||
else if (ret == phy_modtype_twinax ||
|
||||
ret == phy_modtype_twinax_long)
|
||||
edc_needed = edc_twinax;
|
||||
else
|
||||
edc_needed = edc_sr;
|
||||
|
||||
if (edc_needed != phy->priv) {
|
||||
ret = ael2020_reset(phy, 0);
|
||||
return ret ? ret : cphy_cause_module_change;
|
||||
}
|
||||
cause = cphy_cause_module_change;
|
||||
}
|
||||
|
||||
ret = t3_phy_lasi_intr_handler(phy);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret |= cause;
|
||||
return ret ? ret : cphy_cause_link_change;
|
||||
}
|
||||
|
||||
static struct cphy_ops ael2020_ops = {
|
||||
.reset = ael2020_reset,
|
||||
.intr_enable = ael2020_intr_enable,
|
||||
.intr_disable = ael2020_intr_disable,
|
||||
.intr_clear = ael2020_intr_clear,
|
||||
.intr_handler = ael2020_intr_handler,
|
||||
.get_link_status = get_link_status_r,
|
||||
.power_down = ael1002_power_down,
|
||||
.mmds = MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS | MDIO_DEVS_PHYXS,
|
||||
};
|
||||
|
||||
int t3_ael2020_phy_prep(struct cphy *phy, struct adapter *adapter, int phy_addr,
|
||||
const struct mdio_ops *mdio_ops)
|
||||
{
|
||||
cphy_init(phy, adapter, phy_addr, &ael2020_ops, mdio_ops,
|
||||
SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_FIBRE |
|
||||
SUPPORTED_IRQ, "10GBASE-R");
|
||||
msleep(125);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get link status for a 10GBASE-X device.
|
||||
*/
|
||||
|
|
355
drivers/net/cxgb3/aq100x.c
Normal file
355
drivers/net/cxgb3/aq100x.c
Normal file
|
@ -0,0 +1,355 @@
|
|||
/*
|
||||
* Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "common.h"
|
||||
#include "regs.h"
|
||||
|
||||
enum {
|
||||
/* MDIO_DEV_PMA_PMD registers */
|
||||
AQ_LINK_STAT = 0xe800,
|
||||
AQ_IMASK_PMA = 0xf000,
|
||||
|
||||
/* MDIO_DEV_XGXS registers */
|
||||
AQ_XAUI_RX_CFG = 0xc400,
|
||||
AQ_XAUI_TX_CFG = 0xe400,
|
||||
|
||||
/* MDIO_DEV_ANEG registers */
|
||||
AQ_1G_CTRL = 0xc400,
|
||||
AQ_ANEG_STAT = 0xc800,
|
||||
|
||||
/* MDIO_DEV_VEND1 registers */
|
||||
AQ_FW_VERSION = 0x0020,
|
||||
AQ_IFLAG_GLOBAL = 0xfc00,
|
||||
AQ_IMASK_GLOBAL = 0xff00,
|
||||
};
|
||||
|
||||
enum {
|
||||
IMASK_PMA = 1 << 2,
|
||||
IMASK_GLOBAL = 1 << 15,
|
||||
ADV_1G_FULL = 1 << 15,
|
||||
ADV_1G_HALF = 1 << 14,
|
||||
ADV_10G_FULL = 1 << 12,
|
||||
AQ_RESET = (1 << 14) | (1 << 15),
|
||||
AQ_LOWPOWER = 1 << 12,
|
||||
};
|
||||
|
||||
static int aq100x_reset(struct cphy *phy, int wait)
|
||||
{
|
||||
/*
|
||||
* Ignore the caller specified wait time; always wait for the reset to
|
||||
* complete. Can take up to 3s.
|
||||
*/
|
||||
int err = t3_phy_reset(phy, MDIO_MMD_VEND1, 3000);
|
||||
|
||||
if (err)
|
||||
CH_WARN(phy->adapter, "PHY%d: reset failed (0x%x).\n",
|
||||
phy->mdio.prtad, err);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int aq100x_intr_enable(struct cphy *phy)
|
||||
{
|
||||
int err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AQ_IMASK_PMA, IMASK_PMA);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = t3_mdio_write(phy, MDIO_MMD_VEND1, AQ_IMASK_GLOBAL, IMASK_GLOBAL);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int aq100x_intr_disable(struct cphy *phy)
|
||||
{
|
||||
return t3_mdio_write(phy, MDIO_MMD_VEND1, AQ_IMASK_GLOBAL, 0);
|
||||
}
|
||||
|
||||
static int aq100x_intr_clear(struct cphy *phy)
|
||||
{
|
||||
unsigned int v;
|
||||
|
||||
t3_mdio_read(phy, MDIO_MMD_VEND1, AQ_IFLAG_GLOBAL, &v);
|
||||
t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_STAT1, &v);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int aq100x_intr_handler(struct cphy *phy)
|
||||
{
|
||||
int err;
|
||||
unsigned int cause, v;
|
||||
|
||||
err = t3_mdio_read(phy, MDIO_MMD_VEND1, AQ_IFLAG_GLOBAL, &cause);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* Read (and reset) the latching version of the status */
|
||||
t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_STAT1, &v);
|
||||
|
||||
return cphy_cause_link_change;
|
||||
}
|
||||
|
||||
static int aq100x_power_down(struct cphy *phy, int off)
|
||||
{
|
||||
return mdio_set_flag(&phy->mdio, phy->mdio.prtad,
|
||||
MDIO_MMD_PMAPMD, MDIO_CTRL1,
|
||||
MDIO_CTRL1_LPOWER, off);
|
||||
}
|
||||
|
||||
static int aq100x_autoneg_enable(struct cphy *phy)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = aq100x_power_down(phy, 0);
|
||||
if (!err)
|
||||
err = mdio_set_flag(&phy->mdio, phy->mdio.prtad,
|
||||
MDIO_MMD_AN, MDIO_CTRL1,
|
||||
BMCR_ANENABLE | BMCR_ANRESTART, 1);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int aq100x_autoneg_restart(struct cphy *phy)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = aq100x_power_down(phy, 0);
|
||||
if (!err)
|
||||
err = mdio_set_flag(&phy->mdio, phy->mdio.prtad,
|
||||
MDIO_MMD_AN, MDIO_CTRL1,
|
||||
BMCR_ANENABLE | BMCR_ANRESTART, 1);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int aq100x_advertise(struct cphy *phy, unsigned int advertise_map)
|
||||
{
|
||||
unsigned int adv;
|
||||
int err;
|
||||
|
||||
/* 10G advertisement */
|
||||
adv = 0;
|
||||
if (advertise_map & ADVERTISED_10000baseT_Full)
|
||||
adv |= ADV_10G_FULL;
|
||||
err = t3_mdio_change_bits(phy, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL,
|
||||
ADV_10G_FULL, adv);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* 1G advertisement */
|
||||
adv = 0;
|
||||
if (advertise_map & ADVERTISED_1000baseT_Full)
|
||||
adv |= ADV_1G_FULL;
|
||||
if (advertise_map & ADVERTISED_1000baseT_Half)
|
||||
adv |= ADV_1G_HALF;
|
||||
err = t3_mdio_change_bits(phy, MDIO_MMD_AN, AQ_1G_CTRL,
|
||||
ADV_1G_FULL | ADV_1G_HALF, adv);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* 100M, pause advertisement */
|
||||
adv = 0;
|
||||
if (advertise_map & ADVERTISED_100baseT_Half)
|
||||
adv |= ADVERTISE_100HALF;
|
||||
if (advertise_map & ADVERTISED_100baseT_Full)
|
||||
adv |= ADVERTISE_100FULL;
|
||||
if (advertise_map & ADVERTISED_Pause)
|
||||
adv |= ADVERTISE_PAUSE_CAP;
|
||||
if (advertise_map & ADVERTISED_Asym_Pause)
|
||||
adv |= ADVERTISE_PAUSE_ASYM;
|
||||
err = t3_mdio_change_bits(phy, MDIO_MMD_AN, MDIO_AN_ADVERTISE,
|
||||
0xfe0, adv);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int aq100x_set_loopback(struct cphy *phy, int mmd, int dir, int enable)
|
||||
{
|
||||
return mdio_set_flag(&phy->mdio, phy->mdio.prtad,
|
||||
MDIO_MMD_PMAPMD, MDIO_CTRL1,
|
||||
BMCR_LOOPBACK, enable);
|
||||
}
|
||||
|
||||
static int aq100x_set_speed_duplex(struct cphy *phy, int speed, int duplex)
|
||||
{
|
||||
/* no can do */
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int aq100x_get_link_status(struct cphy *phy, int *link_ok,
|
||||
int *speed, int *duplex, int *fc)
|
||||
{
|
||||
int err;
|
||||
unsigned int v;
|
||||
|
||||
if (link_ok) {
|
||||
err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AQ_LINK_STAT, &v);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
*link_ok = v & 1;
|
||||
if (!*link_ok)
|
||||
return 0;
|
||||
}
|
||||
|
||||
err = t3_mdio_read(phy, MDIO_MMD_AN, AQ_ANEG_STAT, &v);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (speed) {
|
||||
switch (v & 0x6) {
|
||||
case 0x6:
|
||||
*speed = SPEED_10000;
|
||||
break;
|
||||
case 0x4:
|
||||
*speed = SPEED_1000;
|
||||
break;
|
||||
case 0x2:
|
||||
*speed = SPEED_100;
|
||||
break;
|
||||
case 0x0:
|
||||
*speed = SPEED_10;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (duplex)
|
||||
*duplex = v & 1 ? DUPLEX_FULL : DUPLEX_HALF;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct cphy_ops aq100x_ops = {
|
||||
.reset = aq100x_reset,
|
||||
.intr_enable = aq100x_intr_enable,
|
||||
.intr_disable = aq100x_intr_disable,
|
||||
.intr_clear = aq100x_intr_clear,
|
||||
.intr_handler = aq100x_intr_handler,
|
||||
.autoneg_enable = aq100x_autoneg_enable,
|
||||
.autoneg_restart = aq100x_autoneg_restart,
|
||||
.advertise = aq100x_advertise,
|
||||
.set_loopback = aq100x_set_loopback,
|
||||
.set_speed_duplex = aq100x_set_speed_duplex,
|
||||
.get_link_status = aq100x_get_link_status,
|
||||
.power_down = aq100x_power_down,
|
||||
.mmds = MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS | MDIO_DEVS_PHYXS,
|
||||
};
|
||||
|
||||
int t3_aq100x_phy_prep(struct cphy *phy, struct adapter *adapter, int phy_addr,
|
||||
const struct mdio_ops *mdio_ops)
|
||||
{
|
||||
unsigned int v, v2, gpio, wait;
|
||||
int err;
|
||||
|
||||
cphy_init(phy, adapter, phy_addr, &aq100x_ops, mdio_ops,
|
||||
SUPPORTED_1000baseT_Full | SUPPORTED_10000baseT_Full |
|
||||
SUPPORTED_Autoneg | SUPPORTED_AUI, "1000/10GBASE-T");
|
||||
|
||||
/*
|
||||
* The PHY has been out of reset ever since the system powered up. So
|
||||
* we do a hard reset over here.
|
||||
*/
|
||||
gpio = phy_addr ? F_GPIO10_OUT_VAL : F_GPIO6_OUT_VAL;
|
||||
t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, gpio, 0);
|
||||
msleep(1);
|
||||
t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, gpio, gpio);
|
||||
|
||||
/*
|
||||
* Give it enough time to load the firmware and get ready for mdio.
|
||||
*/
|
||||
msleep(1000);
|
||||
wait = 500; /* in 10ms increments */
|
||||
do {
|
||||
err = t3_mdio_read(phy, MDIO_MMD_VEND1, MDIO_CTRL1, &v);
|
||||
if (err || v == 0xffff) {
|
||||
|
||||
/* Allow prep_adapter to succeed when ffff is read */
|
||||
|
||||
CH_WARN(adapter, "PHY%d: reset failed (0x%x, 0x%x).\n",
|
||||
phy_addr, err, v);
|
||||
goto done;
|
||||
}
|
||||
|
||||
v &= AQ_RESET;
|
||||
if (v)
|
||||
msleep(10);
|
||||
} while (v && --wait);
|
||||
if (v) {
|
||||
CH_WARN(adapter, "PHY%d: reset timed out (0x%x).\n",
|
||||
phy_addr, v);
|
||||
|
||||
goto done; /* let prep_adapter succeed */
|
||||
}
|
||||
|
||||
/* Datasheet says 3s max but this has been observed */
|
||||
wait = (500 - wait) * 10 + 1000;
|
||||
if (wait > 3000)
|
||||
CH_WARN(adapter, "PHY%d: reset took %ums\n", phy_addr, wait);
|
||||
|
||||
/* Firmware version check. */
|
||||
t3_mdio_read(phy, MDIO_MMD_VEND1, AQ_FW_VERSION, &v);
|
||||
if (v != 30) {
|
||||
CH_WARN(adapter, "PHY%d: unsupported firmware %d\n",
|
||||
phy_addr, v);
|
||||
return 0; /* allow t3_prep_adapter to succeed */
|
||||
}
|
||||
|
||||
/*
|
||||
* The PHY should start in really-low-power mode. Prepare it for normal
|
||||
* operations.
|
||||
*/
|
||||
err = t3_mdio_read(phy, MDIO_MMD_VEND1, MDIO_CTRL1, &v);
|
||||
if (err)
|
||||
return err;
|
||||
if (v & AQ_LOWPOWER) {
|
||||
err = t3_mdio_change_bits(phy, MDIO_MMD_VEND1, MDIO_CTRL1,
|
||||
AQ_LOWPOWER, 0);
|
||||
if (err)
|
||||
return err;
|
||||
msleep(10);
|
||||
} else
|
||||
CH_WARN(adapter, "PHY%d does not start in low power mode.\n",
|
||||
phy_addr);
|
||||
|
||||
/*
|
||||
* Verify XAUI settings, but let prep succeed no matter what.
|
||||
*/
|
||||
v = v2 = 0;
|
||||
t3_mdio_read(phy, MDIO_MMD_PHYXS, AQ_XAUI_RX_CFG, &v);
|
||||
t3_mdio_read(phy, MDIO_MMD_PHYXS, AQ_XAUI_TX_CFG, &v2);
|
||||
if (v != 0x1b || v2 != 0x1b)
|
||||
CH_WARN(adapter,
|
||||
"PHY%d: incorrect XAUI settings (0x%x, 0x%x).\n",
|
||||
phy_addr, v, v2);
|
||||
|
||||
done:
|
||||
return err;
|
||||
}
|
|
@ -802,8 +802,12 @@ int t3_ael1006_phy_prep(struct cphy *phy, struct adapter *adapter,
|
|||
int phy_addr, const struct mdio_ops *mdio_ops);
|
||||
int t3_ael2005_phy_prep(struct cphy *phy, struct adapter *adapter,
|
||||
int phy_addr, const struct mdio_ops *mdio_ops);
|
||||
int t3_ael2020_phy_prep(struct cphy *phy, struct adapter *adapter,
|
||||
int phy_addr, const struct mdio_ops *mdio_ops);
|
||||
int t3_qt2045_phy_prep(struct cphy *phy, struct adapter *adapter, int phy_addr,
|
||||
const struct mdio_ops *mdio_ops);
|
||||
int t3_xaui_direct_phy_prep(struct cphy *phy, struct adapter *adapter,
|
||||
int phy_addr, const struct mdio_ops *mdio_ops);
|
||||
int t3_aq100x_phy_prep(struct cphy *phy, struct adapter *adapter,
|
||||
int phy_addr, const struct mdio_ops *mdio_ops);
|
||||
#endif /* __CHELSIO_COMMON_H */
|
||||
|
|
|
@ -91,6 +91,8 @@ static const struct pci_device_id cxgb3_pci_tbl[] = {
|
|||
CH_DEVICE(0x31, 3), /* T3B20 */
|
||||
CH_DEVICE(0x32, 1), /* T3B02 */
|
||||
CH_DEVICE(0x35, 6), /* T3C20-derived T3C10 */
|
||||
CH_DEVICE(0x36, 3), /* S320E-CR */
|
||||
CH_DEVICE(0x37, 7), /* N320E-G2 */
|
||||
{0,}
|
||||
};
|
||||
|
||||
|
@ -431,40 +433,78 @@ static int init_tp_parity(struct adapter *adap)
|
|||
for (i = 0; i < 16; i++) {
|
||||
struct cpl_smt_write_req *req;
|
||||
|
||||
skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
|
||||
skb = alloc_skb(sizeof(*req), GFP_KERNEL);
|
||||
if (!skb)
|
||||
skb = adap->nofail_skb;
|
||||
if (!skb)
|
||||
goto alloc_skb_fail;
|
||||
|
||||
req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
|
||||
memset(req, 0, sizeof(*req));
|
||||
req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
|
||||
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
|
||||
req->iff = i;
|
||||
t3_mgmt_tx(adap, skb);
|
||||
if (skb == adap->nofail_skb) {
|
||||
await_mgmt_replies(adap, cnt, i + 1);
|
||||
adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
|
||||
if (!adap->nofail_skb)
|
||||
goto alloc_skb_fail;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < 2048; i++) {
|
||||
struct cpl_l2t_write_req *req;
|
||||
|
||||
skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
|
||||
skb = alloc_skb(sizeof(*req), GFP_KERNEL);
|
||||
if (!skb)
|
||||
skb = adap->nofail_skb;
|
||||
if (!skb)
|
||||
goto alloc_skb_fail;
|
||||
|
||||
req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
|
||||
memset(req, 0, sizeof(*req));
|
||||
req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
|
||||
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
|
||||
req->params = htonl(V_L2T_W_IDX(i));
|
||||
t3_mgmt_tx(adap, skb);
|
||||
if (skb == adap->nofail_skb) {
|
||||
await_mgmt_replies(adap, cnt, 16 + i + 1);
|
||||
adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
|
||||
if (!adap->nofail_skb)
|
||||
goto alloc_skb_fail;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < 2048; i++) {
|
||||
struct cpl_rte_write_req *req;
|
||||
|
||||
skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
|
||||
skb = alloc_skb(sizeof(*req), GFP_KERNEL);
|
||||
if (!skb)
|
||||
skb = adap->nofail_skb;
|
||||
if (!skb)
|
||||
goto alloc_skb_fail;
|
||||
|
||||
req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req));
|
||||
memset(req, 0, sizeof(*req));
|
||||
req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
|
||||
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
|
||||
req->l2t_idx = htonl(V_L2T_W_IDX(i));
|
||||
t3_mgmt_tx(adap, skb);
|
||||
if (skb == adap->nofail_skb) {
|
||||
await_mgmt_replies(adap, cnt, 16 + 2048 + i + 1);
|
||||
adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
|
||||
if (!adap->nofail_skb)
|
||||
goto alloc_skb_fail;
|
||||
}
|
||||
}
|
||||
|
||||
skb = alloc_skb(sizeof(*greq), GFP_KERNEL | __GFP_NOFAIL);
|
||||
skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
|
||||
if (!skb)
|
||||
skb = adap->nofail_skb;
|
||||
if (!skb)
|
||||
goto alloc_skb_fail;
|
||||
|
||||
greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
|
||||
memset(greq, 0, sizeof(*greq));
|
||||
greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
|
||||
|
@ -473,8 +513,17 @@ static int init_tp_parity(struct adapter *adap)
|
|||
t3_mgmt_tx(adap, skb);
|
||||
|
||||
i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
|
||||
if (skb == adap->nofail_skb) {
|
||||
i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
|
||||
adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
|
||||
}
|
||||
|
||||
t3_tp_set_offload_mode(adap, 0);
|
||||
return i;
|
||||
|
||||
alloc_skb_fail:
|
||||
t3_tp_set_offload_mode(adap, 0);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -869,7 +918,12 @@ static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
|
|||
struct mngt_pktsched_wr *req;
|
||||
int ret;
|
||||
|
||||
skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
|
||||
skb = alloc_skb(sizeof(*req), GFP_KERNEL);
|
||||
if (!skb)
|
||||
skb = adap->nofail_skb;
|
||||
if (!skb)
|
||||
return -ENOMEM;
|
||||
|
||||
req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
|
||||
req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
|
||||
req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
|
||||
|
@ -879,6 +933,12 @@ static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
|
|||
req->max = hi;
|
||||
req->binding = port;
|
||||
ret = t3_mgmt_tx(adap, skb);
|
||||
if (skb == adap->nofail_skb) {
|
||||
adap->nofail_skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
|
||||
GFP_KERNEL);
|
||||
if (!adap->nofail_skb)
|
||||
ret = -ENOMEM;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -2451,14 +2511,16 @@ static void check_link_status(struct adapter *adapter)
|
|||
for_each_port(adapter, i) {
|
||||
struct net_device *dev = adapter->port[i];
|
||||
struct port_info *p = netdev_priv(dev);
|
||||
int link_fault;
|
||||
|
||||
spin_lock_irq(&adapter->work_lock);
|
||||
if (p->link_fault) {
|
||||
link_fault = p->link_fault;
|
||||
spin_unlock_irq(&adapter->work_lock);
|
||||
|
||||
if (link_fault) {
|
||||
t3_link_fault(adapter, i);
|
||||
spin_unlock_irq(&adapter->work_lock);
|
||||
continue;
|
||||
}
|
||||
spin_unlock_irq(&adapter->work_lock);
|
||||
|
||||
if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev)) {
|
||||
t3_xgm_intr_disable(adapter, i);
|
||||
|
@ -3016,6 +3078,14 @@ static int __devinit init_one(struct pci_dev *pdev,
|
|||
goto out_disable_device;
|
||||
}
|
||||
|
||||
adapter->nofail_skb =
|
||||
alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_KERNEL);
|
||||
if (!adapter->nofail_skb) {
|
||||
dev_err(&pdev->dev, "cannot allocate nofail buffer\n");
|
||||
err = -ENOMEM;
|
||||
goto out_free_adapter;
|
||||
}
|
||||
|
||||
adapter->regs = ioremap_nocache(mmio_start, mmio_len);
|
||||
if (!adapter->regs) {
|
||||
dev_err(&pdev->dev, "cannot map device registers\n");
|
||||
|
@ -3059,7 +3129,6 @@ static int __devinit init_one(struct pci_dev *pdev,
|
|||
netdev->mem_start = mmio_start;
|
||||
netdev->mem_end = mmio_start + mmio_len - 1;
|
||||
netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
|
||||
netdev->features |= NETIF_F_LLTX;
|
||||
netdev->features |= NETIF_F_GRO;
|
||||
if (pci_using_dac)
|
||||
netdev->features |= NETIF_F_HIGHDMA;
|
||||
|
@ -3173,6 +3242,8 @@ static void __devexit remove_one(struct pci_dev *pdev)
|
|||
free_netdev(adapter->port[i]);
|
||||
|
||||
iounmap(adapter->regs);
|
||||
if (adapter->nofail_skb)
|
||||
kfree_skb(adapter->nofail_skb);
|
||||
kfree(adapter);
|
||||
pci_release_regions(pdev);
|
||||
pci_disable_device(pdev);
|
||||
|
|
|
@ -566,13 +566,31 @@ static void t3_process_tid_release_list(struct work_struct *work)
|
|||
spin_unlock_bh(&td->tid_release_lock);
|
||||
|
||||
skb = alloc_skb(sizeof(struct cpl_tid_release),
|
||||
GFP_KERNEL | __GFP_NOFAIL);
|
||||
GFP_KERNEL);
|
||||
if (!skb)
|
||||
skb = td->nofail_skb;
|
||||
if (!skb) {
|
||||
spin_lock_bh(&td->tid_release_lock);
|
||||
p->ctx = (void *)td->tid_release_list;
|
||||
td->tid_release_list = (struct t3c_tid_entry *)p;
|
||||
break;
|
||||
}
|
||||
mk_tid_release(skb, p - td->tid_maps.tid_tab);
|
||||
cxgb3_ofld_send(tdev, skb);
|
||||
p->ctx = NULL;
|
||||
if (skb == td->nofail_skb)
|
||||
td->nofail_skb =
|
||||
alloc_skb(sizeof(struct cpl_tid_release),
|
||||
GFP_KERNEL);
|
||||
spin_lock_bh(&td->tid_release_lock);
|
||||
}
|
||||
td->release_list_incomplete = (td->tid_release_list == NULL) ? 0 : 1;
|
||||
spin_unlock_bh(&td->tid_release_lock);
|
||||
|
||||
if (!td->nofail_skb)
|
||||
td->nofail_skb =
|
||||
alloc_skb(sizeof(struct cpl_tid_release),
|
||||
GFP_KERNEL);
|
||||
}
|
||||
|
||||
/* use ctx as a next pointer in the tid release list */
|
||||
|
@ -585,7 +603,7 @@ void cxgb3_queue_tid_release(struct t3cdev *tdev, unsigned int tid)
|
|||
p->ctx = (void *)td->tid_release_list;
|
||||
p->client = NULL;
|
||||
td->tid_release_list = p;
|
||||
if (!p->ctx)
|
||||
if (!p->ctx || td->release_list_incomplete)
|
||||
schedule_work(&td->tid_release_task);
|
||||
spin_unlock_bh(&td->tid_release_lock);
|
||||
}
|
||||
|
@ -1274,6 +1292,9 @@ int cxgb3_offload_activate(struct adapter *adapter)
|
|||
if (list_empty(&adapter_list))
|
||||
register_netevent_notifier(&nb);
|
||||
|
||||
t->nofail_skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_KERNEL);
|
||||
t->release_list_incomplete = 0;
|
||||
|
||||
add_adapter(adapter);
|
||||
return 0;
|
||||
|
||||
|
@ -1298,6 +1319,8 @@ void cxgb3_offload_deactivate(struct adapter *adapter)
|
|||
T3C_DATA(tdev) = NULL;
|
||||
t3_free_l2t(L2DATA(tdev));
|
||||
L2DATA(tdev) = NULL;
|
||||
if (t->nofail_skb)
|
||||
kfree_skb(t->nofail_skb);
|
||||
kfree(t);
|
||||
}
|
||||
|
||||
|
|
|
@ -191,6 +191,9 @@ struct t3c_data {
|
|||
struct t3c_tid_entry *tid_release_list;
|
||||
spinlock_t tid_release_lock;
|
||||
struct work_struct tid_release_task;
|
||||
|
||||
struct sk_buff *nofail_skb;
|
||||
unsigned int release_list_incomplete;
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -355,7 +355,7 @@ static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q,
|
|||
(*d->pg_chunk.p_cnt)--;
|
||||
if (!*d->pg_chunk.p_cnt)
|
||||
pci_unmap_page(pdev,
|
||||
pci_unmap_addr(&d->pg_chunk, mapping),
|
||||
d->pg_chunk.mapping,
|
||||
q->alloc_size, PCI_DMA_FROMDEVICE);
|
||||
|
||||
put_page(d->pg_chunk.page);
|
||||
|
@ -454,7 +454,7 @@ static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q,
|
|||
q->pg_chunk.offset = 0;
|
||||
mapping = pci_map_page(adapter->pdev, q->pg_chunk.page,
|
||||
0, q->alloc_size, PCI_DMA_FROMDEVICE);
|
||||
pci_unmap_addr_set(&q->pg_chunk, mapping, mapping);
|
||||
q->pg_chunk.mapping = mapping;
|
||||
}
|
||||
sd->pg_chunk = q->pg_chunk;
|
||||
|
||||
|
@ -511,8 +511,7 @@ static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
|
|||
nomem: q->alloc_failed++;
|
||||
break;
|
||||
}
|
||||
mapping = pci_unmap_addr(&sd->pg_chunk, mapping) +
|
||||
sd->pg_chunk.offset;
|
||||
mapping = sd->pg_chunk.mapping + sd->pg_chunk.offset;
|
||||
pci_unmap_addr_set(sd, dma_addr, mapping);
|
||||
|
||||
add_one_rx_chunk(mapping, d, q->gen);
|
||||
|
@ -882,7 +881,7 @@ recycle:
|
|||
(*sd->pg_chunk.p_cnt)--;
|
||||
if (!*sd->pg_chunk.p_cnt)
|
||||
pci_unmap_page(adap->pdev,
|
||||
pci_unmap_addr(&sd->pg_chunk, mapping),
|
||||
sd->pg_chunk.mapping,
|
||||
fl->alloc_size,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
if (!skb) {
|
||||
|
@ -1241,7 +1240,6 @@ int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
q = &qs->txq[TXQ_ETH];
|
||||
txq = netdev_get_tx_queue(dev, qidx);
|
||||
|
||||
spin_lock(&q->lock);
|
||||
reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
|
||||
|
||||
credits = q->size - q->in_use;
|
||||
|
@ -1252,7 +1250,6 @@ int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
dev_err(&adap->pdev->dev,
|
||||
"%s: Tx ring %u full while queue awake!\n",
|
||||
dev->name, q->cntxt_id & 7);
|
||||
spin_unlock(&q->lock);
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
|
@ -1286,9 +1283,6 @@ int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
if (vlan_tx_tag_present(skb) && pi->vlan_grp)
|
||||
qs->port_stats[SGE_PSTAT_VLANINS]++;
|
||||
|
||||
dev->trans_start = jiffies;
|
||||
spin_unlock(&q->lock);
|
||||
|
||||
/*
|
||||
* We do not use Tx completion interrupts to free DMAd Tx packets.
|
||||
* This is good for performamce but means that we rely on new Tx
|
||||
|
@ -2096,7 +2090,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
|
|||
(*sd->pg_chunk.p_cnt)--;
|
||||
if (!*sd->pg_chunk.p_cnt)
|
||||
pci_unmap_page(adap->pdev,
|
||||
pci_unmap_addr(&sd->pg_chunk, mapping),
|
||||
sd->pg_chunk.mapping,
|
||||
fl->alloc_size,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
|
||||
|
@ -2858,11 +2852,12 @@ static void sge_timer_tx(unsigned long data)
|
|||
unsigned int tbd[SGE_TXQ_PER_SET] = {0, 0};
|
||||
unsigned long next_period;
|
||||
|
||||
if (spin_trylock(&qs->txq[TXQ_ETH].lock)) {
|
||||
tbd[TXQ_ETH] = reclaim_completed_tx(adap, &qs->txq[TXQ_ETH],
|
||||
TX_RECLAIM_TIMER_CHUNK);
|
||||
spin_unlock(&qs->txq[TXQ_ETH].lock);
|
||||
if (__netif_tx_trylock(qs->tx_q)) {
|
||||
tbd[TXQ_ETH] = reclaim_completed_tx(adap, &qs->txq[TXQ_ETH],
|
||||
TX_RECLAIM_TIMER_CHUNK);
|
||||
__netif_tx_unlock(qs->tx_q);
|
||||
}
|
||||
|
||||
if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) {
|
||||
tbd[TXQ_OFLD] = reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD],
|
||||
TX_RECLAIM_TIMER_CHUNK);
|
||||
|
@ -2870,8 +2865,8 @@ static void sge_timer_tx(unsigned long data)
|
|||
}
|
||||
|
||||
next_period = TX_RECLAIM_PERIOD >>
|
||||
(max(tbd[TXQ_ETH], tbd[TXQ_OFLD]) /
|
||||
TX_RECLAIM_TIMER_CHUNK);
|
||||
(max(tbd[TXQ_ETH], tbd[TXQ_OFLD]) /
|
||||
TX_RECLAIM_TIMER_CHUNK);
|
||||
mod_timer(&qs->tx_reclaim_timer, jiffies + next_period);
|
||||
}
|
||||
|
||||
|
|
|
@ -526,6 +526,11 @@ static const struct adapter_info t3_adap_info[] = {
|
|||
F_GPIO10_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
|
||||
{ S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
|
||||
&mi1_mdio_ext_ops, "Chelsio T310" },
|
||||
{1, 0, 0,
|
||||
F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
|
||||
F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL,
|
||||
{ S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
|
||||
&mi1_mdio_ext_ops, "Chelsio N320E-G2" },
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -552,6 +557,8 @@ static const struct port_type_info port_types[] = {
|
|||
{ t3_qt2045_phy_prep },
|
||||
{ t3_ael1006_phy_prep },
|
||||
{ NULL },
|
||||
{ t3_aq100x_phy_prep },
|
||||
{ t3_ael2020_phy_prep },
|
||||
};
|
||||
|
||||
#define VPD_ENTRY(name, len) \
|
||||
|
@ -1281,6 +1288,11 @@ void t3_link_fault(struct adapter *adapter, int port_id)
|
|||
A_XGM_INT_STATUS + mac->offset);
|
||||
link_fault &= F_LINKFAULTCHANGE;
|
||||
|
||||
link_ok = lc->link_ok;
|
||||
speed = lc->speed;
|
||||
duplex = lc->duplex;
|
||||
fc = lc->fc;
|
||||
|
||||
phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
|
||||
|
||||
if (link_fault) {
|
||||
|
|
|
@ -35,10 +35,10 @@
|
|||
#define DRV_DESC "Chelsio T3 Network Driver"
|
||||
#define DRV_NAME "cxgb3"
|
||||
/* Driver version */
|
||||
#define DRV_VERSION "1.1.2-ko"
|
||||
#define DRV_VERSION "1.1.3-ko"
|
||||
|
||||
/* Firmware version */
|
||||
#define FW_VERSION_MAJOR 7
|
||||
#define FW_VERSION_MINOR 1
|
||||
#define FW_VERSION_MINOR 4
|
||||
#define FW_VERSION_MICRO 0
|
||||
#endif /* __CHELSIO_VERSION_H */
|
||||
|
|
|
@ -1819,7 +1819,6 @@ static int emac_dev_setmac_addr(struct net_device *ndev, void *addr)
|
|||
struct emac_rxch *rxch = priv->rxch[EMAC_DEF_RX_CH];
|
||||
struct device *emac_dev = &priv->ndev->dev;
|
||||
struct sockaddr *sa = addr;
|
||||
DECLARE_MAC_BUF(mac);
|
||||
|
||||
/* Store mac addr in priv and rx channel and set it in EMAC hw */
|
||||
memcpy(priv->mac_addr, sa->sa_data, ndev->addr_len);
|
||||
|
@ -1828,8 +1827,8 @@ static int emac_dev_setmac_addr(struct net_device *ndev, void *addr)
|
|||
emac_setmac(priv, EMAC_DEF_RX_CH, rxch->mac_addr);
|
||||
|
||||
if (netif_msg_drv(priv))
|
||||
dev_notice(emac_dev, "DaVinci EMAC: emac_dev_setmac_addr %s\n",
|
||||
print_mac(mac, priv->mac_addr));
|
||||
dev_notice(emac_dev, "DaVinci EMAC: emac_dev_setmac_addr %pM\n",
|
||||
priv->mac_addr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -2683,11 +2682,10 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
|
|||
ndev->irq = res->start;
|
||||
|
||||
if (!is_valid_ether_addr(priv->mac_addr)) {
|
||||
DECLARE_MAC_BUF(buf);
|
||||
/* Use random MAC if none passed */
|
||||
random_ether_addr(priv->mac_addr);
|
||||
printk(KERN_WARNING "%s: using random MAC addr: %s\n",
|
||||
__func__, print_mac(buf, priv->mac_addr));
|
||||
printk(KERN_WARNING "%s: using random MAC addr: %pM\n",
|
||||
__func__, priv->mac_addr);
|
||||
}
|
||||
|
||||
ndev->netdev_ops = &emac_netdev_ops;
|
||||
|
|
|
@ -895,6 +895,7 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
struct lance_private *lp = netdev_priv(dev);
|
||||
volatile struct lance_regs *ll = lp->ll;
|
||||
volatile u16 *ib = (volatile u16 *)dev->mem_start;
|
||||
unsigned long flags;
|
||||
int entry, len;
|
||||
|
||||
len = skb->len;
|
||||
|
@ -907,6 +908,8 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
|
||||
dev->stats.tx_bytes += len;
|
||||
|
||||
spin_lock_irqsave(&lp->lock, flags);
|
||||
|
||||
entry = lp->tx_new;
|
||||
*lib_ptr(ib, btx_ring[entry].length, lp->type) = (-len);
|
||||
*lib_ptr(ib, btx_ring[entry].misc, lp->type) = 0;
|
||||
|
@ -925,6 +928,8 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
/* Kick the lance: transmit now */
|
||||
writereg(&ll->rdp, LE_C0_INEA | LE_C0_TDMD);
|
||||
|
||||
spin_unlock_irqrestore(&lp->lock, flags);
|
||||
|
||||
dev->trans_start = jiffies;
|
||||
dev_kfree_skb(skb);
|
||||
|
||||
|
|
|
@ -539,7 +539,7 @@ rio_tx_timeout (struct net_device *dev)
|
|||
dev->name, readl (ioaddr + TxStatus));
|
||||
rio_free_tx(dev, 0);
|
||||
dev->if_port = 0;
|
||||
dev->trans_start = jiffies;
|
||||
dev->trans_start = jiffies; /* prevent tx timeout */
|
||||
}
|
||||
|
||||
/* allocate and initialize Tx and Rx descriptors */
|
||||
|
@ -610,7 +610,7 @@ start_xmit (struct sk_buff *skb, struct net_device *dev)
|
|||
|
||||
if (np->link_status == 0) { /* Link Down */
|
||||
dev_kfree_skb(skb);
|
||||
return 0;
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
ioaddr = dev->base_addr;
|
||||
entry = np->cur_tx % TX_RING_SIZE;
|
||||
|
@ -665,9 +665,7 @@ start_xmit (struct sk_buff *skb, struct net_device *dev)
|
|||
writel (0, dev->base_addr + TFDListPtr1);
|
||||
}
|
||||
|
||||
/* NETDEV WATCHDOG timer */
|
||||
dev->trans_start = jiffies;
|
||||
return 0;
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
static irqreturn_t
|
||||
|
|
|
@ -143,6 +143,8 @@
|
|||
* FIXES:
|
||||
* 2005/12/02 - Michael O'Donnell <Michael.ODonnell at stratus dot com>
|
||||
* - Stratus87247: protect MDI control register manipulations
|
||||
* 2009/06/01 - Andreas Mohr <andi at lisas dot de>
|
||||
* - add clean lowlevel I/O emulation for cards with MII-lacking PHYs
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
|
@ -372,6 +374,7 @@ enum eeprom_op {
|
|||
|
||||
enum eeprom_offsets {
|
||||
eeprom_cnfg_mdix = 0x03,
|
||||
eeprom_phy_iface = 0x06,
|
||||
eeprom_id = 0x0A,
|
||||
eeprom_config_asf = 0x0D,
|
||||
eeprom_smbus_addr = 0x90,
|
||||
|
@ -381,6 +384,18 @@ enum eeprom_cnfg_mdix {
|
|||
eeprom_mdix_enabled = 0x0080,
|
||||
};
|
||||
|
||||
enum eeprom_phy_iface {
|
||||
NoSuchPhy = 0,
|
||||
I82553AB,
|
||||
I82553C,
|
||||
I82503,
|
||||
DP83840,
|
||||
S80C240,
|
||||
S80C24,
|
||||
I82555,
|
||||
DP83840A = 10,
|
||||
};
|
||||
|
||||
enum eeprom_id {
|
||||
eeprom_id_wol = 0x0020,
|
||||
};
|
||||
|
@ -545,6 +560,7 @@ struct nic {
|
|||
u32 msg_enable ____cacheline_aligned;
|
||||
struct net_device *netdev;
|
||||
struct pci_dev *pdev;
|
||||
u16 (*mdio_ctrl)(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data);
|
||||
|
||||
struct rx *rxs ____cacheline_aligned;
|
||||
struct rx *rx_to_use;
|
||||
|
@ -899,7 +915,21 @@ err_unlock:
|
|||
return err;
|
||||
}
|
||||
|
||||
static u16 mdio_ctrl(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
|
||||
static int mdio_read(struct net_device *netdev, int addr, int reg)
|
||||
{
|
||||
struct nic *nic = netdev_priv(netdev);
|
||||
return nic->mdio_ctrl(nic, addr, mdi_read, reg, 0);
|
||||
}
|
||||
|
||||
static void mdio_write(struct net_device *netdev, int addr, int reg, int data)
|
||||
{
|
||||
struct nic *nic = netdev_priv(netdev);
|
||||
|
||||
nic->mdio_ctrl(nic, addr, mdi_write, reg, data);
|
||||
}
|
||||
|
||||
/* the standard mdio_ctrl() function for usual MII-compliant hardware */
|
||||
static u16 mdio_ctrl_hw(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
|
||||
{
|
||||
u32 data_out = 0;
|
||||
unsigned int i;
|
||||
|
@ -938,30 +968,83 @@ static u16 mdio_ctrl(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
|
|||
return (u16)data_out;
|
||||
}
|
||||
|
||||
static int mdio_read(struct net_device *netdev, int addr, int reg)
|
||||
/* slightly tweaked mdio_ctrl() function for phy_82552_v specifics */
|
||||
static u16 mdio_ctrl_phy_82552_v(struct nic *nic,
|
||||
u32 addr,
|
||||
u32 dir,
|
||||
u32 reg,
|
||||
u16 data)
|
||||
{
|
||||
return mdio_ctrl(netdev_priv(netdev), addr, mdi_read, reg, 0);
|
||||
if ((reg == MII_BMCR) && (dir == mdi_write)) {
|
||||
if (data & (BMCR_ANRESTART | BMCR_ANENABLE)) {
|
||||
u16 advert = mdio_read(nic->netdev, nic->mii.phy_id,
|
||||
MII_ADVERTISE);
|
||||
|
||||
/*
|
||||
* Workaround Si issue where sometimes the part will not
|
||||
* autoneg to 100Mbps even when advertised.
|
||||
*/
|
||||
if (advert & ADVERTISE_100FULL)
|
||||
data |= BMCR_SPEED100 | BMCR_FULLDPLX;
|
||||
else if (advert & ADVERTISE_100HALF)
|
||||
data |= BMCR_SPEED100;
|
||||
}
|
||||
}
|
||||
return mdio_ctrl_hw(nic, addr, dir, reg, data);
|
||||
}
|
||||
|
||||
static void mdio_write(struct net_device *netdev, int addr, int reg, int data)
|
||||
/* Fully software-emulated mdio_ctrl() function for cards without
|
||||
* MII-compliant PHYs.
|
||||
* For now, this is mainly geared towards 80c24 support; in case of further
|
||||
* requirements for other types (i82503, ...?) either extend this mechanism
|
||||
* or split it, whichever is cleaner.
|
||||
*/
|
||||
static u16 mdio_ctrl_phy_mii_emulated(struct nic *nic,
|
||||
u32 addr,
|
||||
u32 dir,
|
||||
u32 reg,
|
||||
u16 data)
|
||||
{
|
||||
struct nic *nic = netdev_priv(netdev);
|
||||
/* might need to allocate a netdev_priv'ed register array eventually
|
||||
* to be able to record state changes, but for now
|
||||
* some fully hardcoded register handling ought to be ok I guess. */
|
||||
|
||||
if ((nic->phy == phy_82552_v) && (reg == MII_BMCR) &&
|
||||
(data & (BMCR_ANRESTART | BMCR_ANENABLE))) {
|
||||
u16 advert = mdio_read(netdev, nic->mii.phy_id, MII_ADVERTISE);
|
||||
|
||||
/*
|
||||
* Workaround Si issue where sometimes the part will not
|
||||
* autoneg to 100Mbps even when advertised.
|
||||
*/
|
||||
if (advert & ADVERTISE_100FULL)
|
||||
data |= BMCR_SPEED100 | BMCR_FULLDPLX;
|
||||
else if (advert & ADVERTISE_100HALF)
|
||||
data |= BMCR_SPEED100;
|
||||
if (dir == mdi_read) {
|
||||
switch (reg) {
|
||||
case MII_BMCR:
|
||||
/* Auto-negotiation, right? */
|
||||
return BMCR_ANENABLE |
|
||||
BMCR_FULLDPLX;
|
||||
case MII_BMSR:
|
||||
return BMSR_LSTATUS /* for mii_link_ok() */ |
|
||||
BMSR_ANEGCAPABLE |
|
||||
BMSR_10FULL;
|
||||
case MII_ADVERTISE:
|
||||
/* 80c24 is a "combo card" PHY, right? */
|
||||
return ADVERTISE_10HALF |
|
||||
ADVERTISE_10FULL;
|
||||
default:
|
||||
DPRINTK(HW, DEBUG,
|
||||
"%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
|
||||
dir == mdi_read ? "READ" : "WRITE", addr, reg, data);
|
||||
return 0xFFFF;
|
||||
}
|
||||
} else {
|
||||
switch (reg) {
|
||||
default:
|
||||
DPRINTK(HW, DEBUG,
|
||||
"%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
|
||||
dir == mdi_read ? "READ" : "WRITE", addr, reg, data);
|
||||
return 0xFFFF;
|
||||
}
|
||||
}
|
||||
|
||||
mdio_ctrl(netdev_priv(netdev), addr, mdi_write, reg, data);
|
||||
}
|
||||
static inline int e100_phy_supports_mii(struct nic *nic)
|
||||
{
|
||||
/* for now, just check it by comparing whether we
|
||||
are using MII software emulation.
|
||||
*/
|
||||
return (nic->mdio_ctrl != mdio_ctrl_phy_mii_emulated);
|
||||
}
|
||||
|
||||
static void e100_get_defaults(struct nic *nic)
|
||||
|
@ -1013,7 +1096,8 @@ static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
|
|||
config->standard_stat_counter = 0x1; /* 1=standard, 0=extended */
|
||||
config->rx_discard_short_frames = 0x1; /* 1=discard, 0=pass */
|
||||
config->tx_underrun_retry = 0x3; /* # of underrun retries */
|
||||
config->mii_mode = 0x1; /* 1=MII mode, 0=503 mode */
|
||||
if (e100_phy_supports_mii(nic))
|
||||
config->mii_mode = 1; /* 1=MII mode, 0=i82503 mode */
|
||||
config->pad10 = 0x6;
|
||||
config->no_source_addr_insertion = 0x1; /* 1=no, 0=yes */
|
||||
config->preamble_length = 0x2; /* 0=1, 1=3, 2=7, 3=15 bytes */
|
||||
|
@ -1270,6 +1354,42 @@ static void e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)
|
|||
offsetof(struct mem, dump_buf));
|
||||
}
|
||||
|
||||
static int e100_phy_check_without_mii(struct nic *nic)
|
||||
{
|
||||
u8 phy_type;
|
||||
int without_mii;
|
||||
|
||||
phy_type = (nic->eeprom[eeprom_phy_iface] >> 8) & 0x0f;
|
||||
|
||||
switch (phy_type) {
|
||||
case NoSuchPhy: /* Non-MII PHY; UNTESTED! */
|
||||
case I82503: /* Non-MII PHY; UNTESTED! */
|
||||
case S80C24: /* Non-MII PHY; tested and working */
|
||||
/* paragraph from the FreeBSD driver, "FXP_PHY_80C24":
|
||||
* The Seeq 80c24 AutoDUPLEX(tm) Ethernet Interface Adapter
|
||||
* doesn't have a programming interface of any sort. The
|
||||
* media is sensed automatically based on how the link partner
|
||||
* is configured. This is, in essence, manual configuration.
|
||||
*/
|
||||
DPRINTK(PROBE, INFO,
|
||||
"found MII-less i82503 or 80c24 or other PHY\n");
|
||||
|
||||
nic->mdio_ctrl = mdio_ctrl_phy_mii_emulated;
|
||||
nic->mii.phy_id = 0; /* is this ok for an MII-less PHY? */
|
||||
|
||||
/* these might be needed for certain MII-less cards...
|
||||
* nic->flags |= ich;
|
||||
* nic->flags |= ich_10h_workaround; */
|
||||
|
||||
without_mii = 1;
|
||||
break;
|
||||
default:
|
||||
without_mii = 0;
|
||||
break;
|
||||
}
|
||||
return without_mii;
|
||||
}
|
||||
|
||||
#define NCONFIG_AUTO_SWITCH 0x0080
|
||||
#define MII_NSC_CONG MII_RESV1
|
||||
#define NSC_CONG_ENABLE 0x0100
|
||||
|
@ -1290,9 +1410,21 @@ static int e100_phy_init(struct nic *nic)
|
|||
if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
|
||||
break;
|
||||
}
|
||||
DPRINTK(HW, DEBUG, "phy_addr = %d\n", nic->mii.phy_id);
|
||||
if (addr == 32)
|
||||
return -EAGAIN;
|
||||
if (addr == 32) {
|
||||
/* uhoh, no PHY detected: check whether we seem to be some
|
||||
* weird, rare variant which is *known* to not have any MII.
|
||||
* But do this AFTER MII checking only, since this does
|
||||
* lookup of EEPROM values which may easily be unreliable. */
|
||||
if (e100_phy_check_without_mii(nic))
|
||||
return 0; /* simply return and hope for the best */
|
||||
else {
|
||||
/* for unknown cases log a fatal error */
|
||||
DPRINTK(HW, ERR,
|
||||
"Failed to locate any known PHY, aborting.\n");
|
||||
return -EAGAIN;
|
||||
}
|
||||
} else
|
||||
DPRINTK(HW, DEBUG, "phy_addr = %d\n", nic->mii.phy_id);
|
||||
|
||||
/* Isolate all the PHY ids */
|
||||
for (addr = 0; addr < 32; addr++)
|
||||
|
@ -1320,6 +1452,9 @@ static int e100_phy_init(struct nic *nic)
|
|||
if (nic->phy == phy_82552_v) {
|
||||
u16 advert = mdio_read(netdev, nic->mii.phy_id, MII_ADVERTISE);
|
||||
|
||||
/* assign special tweaked mdio_ctrl() function */
|
||||
nic->mdio_ctrl = mdio_ctrl_phy_82552_v;
|
||||
|
||||
/* Workaround Si not advertising flow-control during autoneg */
|
||||
advert |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
|
||||
mdio_write(netdev, nic->mii.phy_id, MII_ADVERTISE, advert);
|
||||
|
@ -2585,6 +2720,7 @@ static int __devinit e100_probe(struct pci_dev *pdev,
|
|||
nic->netdev = netdev;
|
||||
nic->pdev = pdev;
|
||||
nic->msg_enable = (1 << debug) - 1;
|
||||
nic->mdio_ctrl = mdio_ctrl_hw;
|
||||
pci_set_drvdata(pdev, netdev);
|
||||
|
||||
if ((err = pci_enable_device(pdev))) {
|
||||
|
@ -2822,12 +2958,13 @@ static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel
|
|||
struct net_device *netdev = pci_get_drvdata(pdev);
|
||||
struct nic *nic = netdev_priv(netdev);
|
||||
|
||||
/* Similar to calling e100_down(), but avoids adapter I/O. */
|
||||
e100_close(netdev);
|
||||
|
||||
/* Detach; put netif into a state similar to hotplug unplug. */
|
||||
napi_enable(&nic->napi);
|
||||
netif_device_detach(netdev);
|
||||
|
||||
if (state == pci_channel_io_perm_failure)
|
||||
return PCI_ERS_RESULT_DISCONNECT;
|
||||
|
||||
if (netif_running(netdev))
|
||||
e100_down(nic);
|
||||
pci_disable_device(pdev);
|
||||
|
||||
/* Request a slot reset. */
|
||||
|
|
|
@ -2330,7 +2330,8 @@ static void e1000_set_rx_mode(struct net_device *netdev)
|
|||
{
|
||||
struct e1000_adapter *adapter = netdev_priv(netdev);
|
||||
struct e1000_hw *hw = &adapter->hw;
|
||||
struct dev_addr_list *uc_ptr;
|
||||
struct netdev_hw_addr *ha;
|
||||
bool use_uc = false;
|
||||
struct dev_addr_list *mc_ptr;
|
||||
u32 rctl;
|
||||
u32 hash_value;
|
||||
|
@ -2369,12 +2370,11 @@ static void e1000_set_rx_mode(struct net_device *netdev)
|
|||
rctl |= E1000_RCTL_VFE;
|
||||
}
|
||||
|
||||
uc_ptr = NULL;
|
||||
if (netdev->uc_count > rar_entries - 1) {
|
||||
rctl |= E1000_RCTL_UPE;
|
||||
} else if (!(netdev->flags & IFF_PROMISC)) {
|
||||
rctl &= ~E1000_RCTL_UPE;
|
||||
uc_ptr = netdev->uc_list;
|
||||
use_uc = true;
|
||||
}
|
||||
|
||||
ew32(RCTL, rctl);
|
||||
|
@ -2392,13 +2392,20 @@ static void e1000_set_rx_mode(struct net_device *netdev)
|
|||
* if there are not 14 addresses, go ahead and clear the filters
|
||||
* -- with 82571 controllers only 0-13 entries are filled here
|
||||
*/
|
||||
i = 1;
|
||||
if (use_uc)
|
||||
list_for_each_entry(ha, &netdev->uc_list, list) {
|
||||
if (i == rar_entries)
|
||||
break;
|
||||
e1000_rar_set(hw, ha->addr, i++);
|
||||
}
|
||||
|
||||
WARN_ON(i == rar_entries);
|
||||
|
||||
mc_ptr = netdev->mc_list;
|
||||
|
||||
for (i = 1; i < rar_entries; i++) {
|
||||
if (uc_ptr) {
|
||||
e1000_rar_set(hw, uc_ptr->da_addr, i);
|
||||
uc_ptr = uc_ptr->next;
|
||||
} else if (mc_ptr) {
|
||||
for (; i < rar_entries; i++) {
|
||||
if (mc_ptr) {
|
||||
e1000_rar_set(hw, mc_ptr->da_addr, i);
|
||||
mc_ptr = mc_ptr->next;
|
||||
} else {
|
||||
|
@ -2408,7 +2415,6 @@ static void e1000_set_rx_mode(struct net_device *netdev)
|
|||
E1000_WRITE_FLUSH();
|
||||
}
|
||||
}
|
||||
WARN_ON(uc_ptr != NULL);
|
||||
|
||||
/* load any remaining addresses into the hash table */
|
||||
|
||||
|
@ -2992,7 +2998,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
|
|||
size -= 4;
|
||||
|
||||
buffer_info->length = size;
|
||||
buffer_info->dma = map[0] + offset;
|
||||
buffer_info->dma = skb_shinfo(skb)->dma_head + offset;
|
||||
buffer_info->time_stamp = jiffies;
|
||||
buffer_info->next_to_watch = i;
|
||||
|
||||
|
@ -3033,7 +3039,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
|
|||
size -= 4;
|
||||
|
||||
buffer_info->length = size;
|
||||
buffer_info->dma = map[f + 1] + offset;
|
||||
buffer_info->dma = map[f] + offset;
|
||||
buffer_info->time_stamp = jiffies;
|
||||
buffer_info->next_to_watch = i;
|
||||
|
||||
|
@ -3365,7 +3371,6 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
|
|||
|
||||
if (count) {
|
||||
e1000_tx_queue(adapter, tx_ring, tx_flags, count);
|
||||
netdev->trans_start = jiffies;
|
||||
/* Make sure there is space in the ring for the next send. */
|
||||
e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
|
||||
|
||||
|
@ -4030,8 +4035,9 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
|
|||
PCI_DMA_FROMDEVICE);
|
||||
|
||||
length = le16_to_cpu(rx_desc->length);
|
||||
|
||||
if (unlikely(!(status & E1000_RXD_STAT_EOP))) {
|
||||
/* !EOP means multiple descriptors were used to store a single
|
||||
* packet, also make sure the frame isn't just CRC only */
|
||||
if (unlikely(!(status & E1000_RXD_STAT_EOP) || (length <= 4))) {
|
||||
/* All receives must fit into a single buffer */
|
||||
E1000_DBG("%s: Receive packet consumed multiple"
|
||||
" buffers\n", netdev->name);
|
||||
|
|
|
@ -71,6 +71,7 @@ static s32 e1000_setup_link_82571(struct e1000_hw *hw);
|
|||
static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw);
|
||||
static bool e1000_check_mng_mode_82574(struct e1000_hw *hw);
|
||||
static s32 e1000_led_on_82574(struct e1000_hw *hw);
|
||||
static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw);
|
||||
|
||||
/**
|
||||
* e1000_init_phy_params_82571 - Init PHY func ptrs.
|
||||
|
@ -212,6 +213,9 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
|
|||
struct e1000_hw *hw = &adapter->hw;
|
||||
struct e1000_mac_info *mac = &hw->mac;
|
||||
struct e1000_mac_operations *func = &mac->ops;
|
||||
u32 swsm = 0;
|
||||
u32 swsm2 = 0;
|
||||
bool force_clear_smbi = false;
|
||||
|
||||
/* Set media type */
|
||||
switch (adapter->pdev->device) {
|
||||
|
@ -276,6 +280,50 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
|
|||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* Ensure that the inter-port SWSM.SMBI lock bit is clear before
|
||||
* first NVM or PHY acess. This should be done for single-port
|
||||
* devices, and for one port only on dual-port devices so that
|
||||
* for those devices we can still use the SMBI lock to synchronize
|
||||
* inter-port accesses to the PHY & NVM.
|
||||
*/
|
||||
switch (hw->mac.type) {
|
||||
case e1000_82571:
|
||||
case e1000_82572:
|
||||
swsm2 = er32(SWSM2);
|
||||
|
||||
if (!(swsm2 & E1000_SWSM2_LOCK)) {
|
||||
/* Only do this for the first interface on this card */
|
||||
ew32(SWSM2,
|
||||
swsm2 | E1000_SWSM2_LOCK);
|
||||
force_clear_smbi = true;
|
||||
} else
|
||||
force_clear_smbi = false;
|
||||
break;
|
||||
default:
|
||||
force_clear_smbi = true;
|
||||
break;
|
||||
}
|
||||
|
||||
if (force_clear_smbi) {
|
||||
/* Make sure SWSM.SMBI is clear */
|
||||
swsm = er32(SWSM);
|
||||
if (swsm & E1000_SWSM_SMBI) {
|
||||
/* This bit should not be set on a first interface, and
|
||||
* indicates that the bootagent or EFI code has
|
||||
* improperly left this bit enabled
|
||||
*/
|
||||
hw_dbg(hw, "Please update your 82571 Bootagent\n");
|
||||
}
|
||||
ew32(SWSM, swsm & ~E1000_SWSM_SMBI);
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialze device specific counter of SMBI acquisition
|
||||
* timeouts.
|
||||
*/
|
||||
hw->dev_spec.e82571.smb_counter = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -341,8 +389,10 @@ static s32 e1000_get_variants_82571(struct e1000_adapter *adapter)
|
|||
if (e1000_read_nvm(&adapter->hw, NVM_INIT_3GIO_3, 1,
|
||||
&eeprom_data) < 0)
|
||||
break;
|
||||
if (eeprom_data & NVM_WORD1A_ASPM_MASK)
|
||||
adapter->flags &= ~FLAG_HAS_JUMBO_FRAMES;
|
||||
if (!(eeprom_data & NVM_WORD1A_ASPM_MASK)) {
|
||||
adapter->flags |= FLAG_HAS_JUMBO_FRAMES;
|
||||
adapter->max_hw_frame_size = DEFAULT_JUMBO;
|
||||
}
|
||||
}
|
||||
break;
|
||||
default:
|
||||
|
@ -411,11 +461,37 @@ static s32 e1000_get_phy_id_82571(struct e1000_hw *hw)
|
|||
static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw)
|
||||
{
|
||||
u32 swsm;
|
||||
s32 timeout = hw->nvm.word_size + 1;
|
||||
s32 sw_timeout = hw->nvm.word_size + 1;
|
||||
s32 fw_timeout = hw->nvm.word_size + 1;
|
||||
s32 i = 0;
|
||||
|
||||
/*
|
||||
* If we have timedout 3 times on trying to acquire
|
||||
* the inter-port SMBI semaphore, there is old code
|
||||
* operating on the other port, and it is not
|
||||
* releasing SMBI. Modify the number of times that
|
||||
* we try for the semaphore to interwork with this
|
||||
* older code.
|
||||
*/
|
||||
if (hw->dev_spec.e82571.smb_counter > 2)
|
||||
sw_timeout = 1;
|
||||
|
||||
/* Get the SW semaphore */
|
||||
while (i < sw_timeout) {
|
||||
swsm = er32(SWSM);
|
||||
if (!(swsm & E1000_SWSM_SMBI))
|
||||
break;
|
||||
|
||||
udelay(50);
|
||||
i++;
|
||||
}
|
||||
|
||||
if (i == sw_timeout) {
|
||||
hw_dbg(hw, "Driver can't access device - SMBI bit is set.\n");
|
||||
hw->dev_spec.e82571.smb_counter++;
|
||||
}
|
||||
/* Get the FW semaphore. */
|
||||
for (i = 0; i < timeout; i++) {
|
||||
for (i = 0; i < fw_timeout; i++) {
|
||||
swsm = er32(SWSM);
|
||||
ew32(SWSM, swsm | E1000_SWSM_SWESMBI);
|
||||
|
||||
|
@ -426,9 +502,9 @@ static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw)
|
|||
udelay(50);
|
||||
}
|
||||
|
||||
if (i == timeout) {
|
||||
if (i == fw_timeout) {
|
||||
/* Release semaphores */
|
||||
e1000e_put_hw_semaphore(hw);
|
||||
e1000_put_hw_semaphore_82571(hw);
|
||||
hw_dbg(hw, "Driver can't access the NVM\n");
|
||||
return -E1000_ERR_NVM;
|
||||
}
|
||||
|
@ -447,9 +523,7 @@ static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw)
|
|||
u32 swsm;
|
||||
|
||||
swsm = er32(SWSM);
|
||||
|
||||
swsm &= ~E1000_SWSM_SWESMBI;
|
||||
|
||||
swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
|
||||
ew32(SWSM, swsm);
|
||||
}
|
||||
|
||||
|
@ -1585,6 +1659,7 @@ static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw)
|
|||
static struct e1000_mac_operations e82571_mac_ops = {
|
||||
/* .check_mng_mode: mac type dependent */
|
||||
/* .check_for_link: media type dependent */
|
||||
.id_led_init = e1000e_id_led_init,
|
||||
.cleanup_led = e1000e_cleanup_led_generic,
|
||||
.clear_hw_cntrs = e1000_clear_hw_cntrs_82571,
|
||||
.get_bus_info = e1000e_get_bus_info_pcie,
|
||||
|
@ -1596,6 +1671,7 @@ static struct e1000_mac_operations e82571_mac_ops = {
|
|||
.init_hw = e1000_init_hw_82571,
|
||||
.setup_link = e1000_setup_link_82571,
|
||||
/* .setup_physical_interface: media type dependent */
|
||||
.setup_led = e1000e_setup_led_generic,
|
||||
};
|
||||
|
||||
static struct e1000_phy_operations e82_phy_ops_igp = {
|
||||
|
@ -1672,6 +1748,7 @@ struct e1000_info e1000_82571_info = {
|
|||
| FLAG_TARC_SPEED_MODE_BIT /* errata */
|
||||
| FLAG_APME_CHECK_PORT_B,
|
||||
.pba = 38,
|
||||
.max_hw_frame_size = DEFAULT_JUMBO,
|
||||
.get_variants = e1000_get_variants_82571,
|
||||
.mac_ops = &e82571_mac_ops,
|
||||
.phy_ops = &e82_phy_ops_igp,
|
||||
|
@ -1688,6 +1765,7 @@ struct e1000_info e1000_82572_info = {
|
|||
| FLAG_HAS_CTRLEXT_ON_LOAD
|
||||
| FLAG_TARC_SPEED_MODE_BIT, /* errata */
|
||||
.pba = 38,
|
||||
.max_hw_frame_size = DEFAULT_JUMBO,
|
||||
.get_variants = e1000_get_variants_82571,
|
||||
.mac_ops = &e82571_mac_ops,
|
||||
.phy_ops = &e82_phy_ops_igp,
|
||||
|
@ -1706,6 +1784,7 @@ struct e1000_info e1000_82573_info = {
|
|||
| FLAG_HAS_ERT
|
||||
| FLAG_HAS_SWSM_ON_LOAD,
|
||||
.pba = 20,
|
||||
.max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN,
|
||||
.get_variants = e1000_get_variants_82571,
|
||||
.mac_ops = &e82571_mac_ops,
|
||||
.phy_ops = &e82_phy_ops_m88,
|
||||
|
@ -1724,6 +1803,7 @@ struct e1000_info e1000_82574_info = {
|
|||
| FLAG_HAS_AMT
|
||||
| FLAG_HAS_CTRLEXT_ON_LOAD,
|
||||
.pba = 20,
|
||||
.max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN,
|
||||
.get_variants = e1000_get_variants_82571,
|
||||
.mac_ops = &e82571_mac_ops,
|
||||
.phy_ops = &e82_phy_ops_bm,
|
||||
|
@ -1740,6 +1820,7 @@ struct e1000_info e1000_82583_info = {
|
|||
| FLAG_HAS_AMT
|
||||
| FLAG_HAS_CTRLEXT_ON_LOAD,
|
||||
.pba = 20,
|
||||
.max_hw_frame_size = DEFAULT_JUMBO,
|
||||
.get_variants = e1000_get_variants_82571,
|
||||
.mac_ops = &e82571_mac_ops,
|
||||
.phy_ops = &e82_phy_ops_bm,
|
||||
|
|
|
@ -56,6 +56,7 @@
|
|||
/* Wake Up Control */
|
||||
#define E1000_WUC_APME 0x00000001 /* APM Enable */
|
||||
#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */
|
||||
#define E1000_WUC_PHY_WAKE 0x00000100 /* if PHY supports wakeup */
|
||||
|
||||
/* Wake Up Filter Control */
|
||||
#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
|
||||
|
@ -65,6 +66,13 @@
|
|||
#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
|
||||
#define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */
|
||||
|
||||
/* Wake Up Status */
|
||||
#define E1000_WUS_LNKC E1000_WUFC_LNKC
|
||||
#define E1000_WUS_MAG E1000_WUFC_MAG
|
||||
#define E1000_WUS_EX E1000_WUFC_EX
|
||||
#define E1000_WUS_MC E1000_WUFC_MC
|
||||
#define E1000_WUS_BC E1000_WUFC_BC
|
||||
|
||||
/* Extended Device Control */
|
||||
#define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Definable Pin 7 */
|
||||
#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */
|
||||
|
@ -77,6 +85,7 @@
|
|||
#define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */
|
||||
#define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */
|
||||
#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */
|
||||
#define E1000_CTRL_EXT_PHYPDEN 0x00100000
|
||||
|
||||
/* Receive Descriptor bit definitions */
|
||||
#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */
|
||||
|
@ -140,6 +149,7 @@
|
|||
#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */
|
||||
#define E1000_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min threshold size */
|
||||
#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */
|
||||
#define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */
|
||||
#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */
|
||||
/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */
|
||||
#define E1000_RCTL_SZ_2048 0x00000000 /* Rx buffer size 2048 */
|
||||
|
@ -153,6 +163,7 @@
|
|||
#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */
|
||||
#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */
|
||||
#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */
|
||||
#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */
|
||||
#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */
|
||||
#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */
|
||||
|
||||
|
@ -255,11 +266,16 @@
|
|||
#define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX
|
||||
|
||||
/* LED Control */
|
||||
#define E1000_PHY_LED0_MODE_MASK 0x00000007
|
||||
#define E1000_PHY_LED0_IVRT 0x00000008
|
||||
#define E1000_PHY_LED0_MASK 0x0000001F
|
||||
|
||||
#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F
|
||||
#define E1000_LEDCTL_LED0_MODE_SHIFT 0
|
||||
#define E1000_LEDCTL_LED0_IVRT 0x00000040
|
||||
#define E1000_LEDCTL_LED0_BLINK 0x00000080
|
||||
|
||||
#define E1000_LEDCTL_MODE_LINK_UP 0x2
|
||||
#define E1000_LEDCTL_MODE_LED_ON 0xE
|
||||
#define E1000_LEDCTL_MODE_LED_OFF 0xF
|
||||
|
||||
|
@ -360,6 +376,8 @@
|
|||
#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
|
||||
#define E1000_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */
|
||||
|
||||
#define E1000_SWSM2_LOCK 0x00000002 /* Secondary driver semaphore bit */
|
||||
|
||||
/* Interrupt Cause Read */
|
||||
#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */
|
||||
#define E1000_ICR_LSC 0x00000004 /* Link Status Change */
|
||||
|
@ -469,6 +487,8 @@
|
|||
#define AUTO_READ_DONE_TIMEOUT 10
|
||||
|
||||
/* Flow Control */
|
||||
#define E1000_FCRTH_RTH 0x0000FFF8 /* Mask Bits[15:3] for RTH */
|
||||
#define E1000_FCRTL_RTL 0x0000FFF8 /* Mask Bits[15:3] for RTL */
|
||||
#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */
|
||||
|
||||
/* Transmit Configuration Word */
|
||||
|
@ -674,6 +694,8 @@
|
|||
#define IFE_C_E_PHY_ID 0x02A80310
|
||||
#define BME1000_E_PHY_ID 0x01410CB0
|
||||
#define BME1000_E_PHY_ID_R2 0x01410CB1
|
||||
#define I82577_E_PHY_ID 0x01540050
|
||||
#define I82578_E_PHY_ID 0x004DD040
|
||||
|
||||
/* M88E1000 Specific Registers */
|
||||
#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */
|
||||
|
@ -727,6 +749,9 @@
|
|||
#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00
|
||||
#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800
|
||||
|
||||
#define I82578_EPSCR_DOWNSHIFT_ENABLE 0x0020
|
||||
#define I82578_EPSCR_DOWNSHIFT_COUNTER_MASK 0x001C
|
||||
|
||||
/* BME1000 PHY Specific Control Register */
|
||||
#define BME1000_PSCR_ENABLE_DOWNSHIFT 0x0800 /* 1 = enable downshift */
|
||||
|
||||
|
|
|
@ -96,6 +96,51 @@ struct e1000_info;
|
|||
/* Number of packet split data buffers (not including the header buffer) */
|
||||
#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1)
|
||||
|
||||
#define DEFAULT_JUMBO 9234
|
||||
|
||||
/* BM/HV Specific Registers */
|
||||
#define BM_PORT_CTRL_PAGE 769
|
||||
|
||||
#define PHY_UPPER_SHIFT 21
|
||||
#define BM_PHY_REG(page, reg) \
|
||||
(((reg) & MAX_PHY_REG_ADDRESS) |\
|
||||
(((page) & 0xFFFF) << PHY_PAGE_SHIFT) |\
|
||||
(((reg) & ~MAX_PHY_REG_ADDRESS) << (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT)))
|
||||
|
||||
/* PHY Wakeup Registers and defines */
|
||||
#define BM_RCTL PHY_REG(BM_WUC_PAGE, 0)
|
||||
#define BM_WUC PHY_REG(BM_WUC_PAGE, 1)
|
||||
#define BM_WUFC PHY_REG(BM_WUC_PAGE, 2)
|
||||
#define BM_WUS PHY_REG(BM_WUC_PAGE, 3)
|
||||
#define BM_RAR_L(_i) (BM_PHY_REG(BM_WUC_PAGE, 16 + ((_i) << 2)))
|
||||
#define BM_RAR_M(_i) (BM_PHY_REG(BM_WUC_PAGE, 17 + ((_i) << 2)))
|
||||
#define BM_RAR_H(_i) (BM_PHY_REG(BM_WUC_PAGE, 18 + ((_i) << 2)))
|
||||
#define BM_RAR_CTRL(_i) (BM_PHY_REG(BM_WUC_PAGE, 19 + ((_i) << 2)))
|
||||
#define BM_MTA(_i) (BM_PHY_REG(BM_WUC_PAGE, 128 + ((_i) << 1)))
|
||||
|
||||
#define BM_RCTL_UPE 0x0001 /* Unicast Promiscuous Mode */
|
||||
#define BM_RCTL_MPE 0x0002 /* Multicast Promiscuous Mode */
|
||||
#define BM_RCTL_MO_SHIFT 3 /* Multicast Offset Shift */
|
||||
#define BM_RCTL_MO_MASK (3 << 3) /* Multicast Offset Mask */
|
||||
#define BM_RCTL_BAM 0x0020 /* Broadcast Accept Mode */
|
||||
#define BM_RCTL_PMCF 0x0040 /* Pass MAC Control Frames */
|
||||
#define BM_RCTL_RFCE 0x0080 /* Rx Flow Control Enable */
|
||||
|
||||
#define HV_SCC_UPPER PHY_REG(778, 16) /* Single Collision Count */
|
||||
#define HV_SCC_LOWER PHY_REG(778, 17)
|
||||
#define HV_ECOL_UPPER PHY_REG(778, 18) /* Excessive Collision Count */
|
||||
#define HV_ECOL_LOWER PHY_REG(778, 19)
|
||||
#define HV_MCC_UPPER PHY_REG(778, 20) /* Multiple Collision Count */
|
||||
#define HV_MCC_LOWER PHY_REG(778, 21)
|
||||
#define HV_LATECOL_UPPER PHY_REG(778, 23) /* Late Collision Count */
|
||||
#define HV_LATECOL_LOWER PHY_REG(778, 24)
|
||||
#define HV_COLC_UPPER PHY_REG(778, 25) /* Collision Count */
|
||||
#define HV_COLC_LOWER PHY_REG(778, 26)
|
||||
#define HV_DC_UPPER PHY_REG(778, 27) /* Defer Count */
|
||||
#define HV_DC_LOWER PHY_REG(778, 28)
|
||||
#define HV_TNCRS_UPPER PHY_REG(778, 29) /* Transmit with no CRS */
|
||||
#define HV_TNCRS_LOWER PHY_REG(778, 30)
|
||||
|
||||
enum e1000_boards {
|
||||
board_82571,
|
||||
board_82572,
|
||||
|
@ -106,6 +151,7 @@ enum e1000_boards {
|
|||
board_ich8lan,
|
||||
board_ich9lan,
|
||||
board_ich10lan,
|
||||
board_pchlan,
|
||||
};
|
||||
|
||||
struct e1000_queue_stats {
|
||||
|
@ -293,6 +339,7 @@ struct e1000_adapter {
|
|||
u32 eeprom_wol;
|
||||
u32 wol;
|
||||
u32 pba;
|
||||
u32 max_hw_frame_size;
|
||||
|
||||
bool fc_autoneg;
|
||||
|
||||
|
@ -302,6 +349,7 @@ struct e1000_adapter {
|
|||
unsigned int flags2;
|
||||
struct work_struct downshift_task;
|
||||
struct work_struct update_phy_task;
|
||||
struct work_struct led_blink_task;
|
||||
};
|
||||
|
||||
struct e1000_info {
|
||||
|
@ -309,6 +357,7 @@ struct e1000_info {
|
|||
unsigned int flags;
|
||||
unsigned int flags2;
|
||||
u32 pba;
|
||||
u32 max_hw_frame_size;
|
||||
s32 (*get_variants)(struct e1000_adapter *);
|
||||
struct e1000_mac_operations *mac_ops;
|
||||
struct e1000_phy_operations *phy_ops;
|
||||
|
@ -351,6 +400,7 @@ struct e1000_info {
|
|||
|
||||
/* CRC Stripping defines */
|
||||
#define FLAG2_CRC_STRIPPING (1 << 0)
|
||||
#define FLAG2_HAS_PHY_WAKEUP (1 << 1)
|
||||
|
||||
#define E1000_RX_DESC_PS(R, i) \
|
||||
(&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
|
||||
|
@ -404,6 +454,7 @@ extern struct e1000_info e1000_82583_info;
|
|||
extern struct e1000_info e1000_ich8_info;
|
||||
extern struct e1000_info e1000_ich9_info;
|
||||
extern struct e1000_info e1000_ich10_info;
|
||||
extern struct e1000_info e1000_pch_info;
|
||||
extern struct e1000_info e1000_es2_info;
|
||||
|
||||
extern s32 e1000e_read_pba_num(struct e1000_hw *hw, u32 *pba_num);
|
||||
|
@ -425,6 +476,7 @@ extern void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw);
|
|||
extern s32 e1000e_check_for_copper_link(struct e1000_hw *hw);
|
||||
extern s32 e1000e_check_for_fiber_link(struct e1000_hw *hw);
|
||||
extern s32 e1000e_check_for_serdes_link(struct e1000_hw *hw);
|
||||
extern s32 e1000e_setup_led_generic(struct e1000_hw *hw);
|
||||
extern s32 e1000e_cleanup_led_generic(struct e1000_hw *hw);
|
||||
extern s32 e1000e_led_on_generic(struct e1000_hw *hw);
|
||||
extern s32 e1000e_led_off_generic(struct e1000_hw *hw);
|
||||
|
@ -493,6 +545,15 @@ extern s32 e1000e_phy_reset_dsp(struct e1000_hw *hw);
|
|||
extern s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
|
||||
extern s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
|
||||
extern s32 e1000e_check_downshift(struct e1000_hw *hw);
|
||||
extern s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data);
|
||||
extern s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data);
|
||||
extern s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw, bool slow);
|
||||
extern s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw);
|
||||
extern s32 e1000_copper_link_setup_82577(struct e1000_hw *hw);
|
||||
extern s32 e1000_check_polarity_82577(struct e1000_hw *hw);
|
||||
extern s32 e1000_get_phy_info_82577(struct e1000_hw *hw);
|
||||
extern s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw);
|
||||
extern s32 e1000_get_cable_length_82577(struct e1000_hw *hw);
|
||||
|
||||
static inline s32 e1000_phy_hw_reset(struct e1000_hw *hw)
|
||||
{
|
||||
|
|
|
@ -1366,6 +1366,7 @@ static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw)
|
|||
}
|
||||
|
||||
static struct e1000_mac_operations es2_mac_ops = {
|
||||
.id_led_init = e1000e_id_led_init,
|
||||
.check_mng_mode = e1000e_check_mng_mode_generic,
|
||||
/* check_for_link dependent on media type */
|
||||
.cleanup_led = e1000e_cleanup_led_generic,
|
||||
|
@ -1379,6 +1380,7 @@ static struct e1000_mac_operations es2_mac_ops = {
|
|||
.init_hw = e1000_init_hw_80003es2lan,
|
||||
.setup_link = e1000e_setup_link,
|
||||
/* setup_physical_interface dependent on media type */
|
||||
.setup_led = e1000e_setup_led_generic,
|
||||
};
|
||||
|
||||
static struct e1000_phy_operations es2_phy_ops = {
|
||||
|
@ -1422,6 +1424,7 @@ struct e1000_info e1000_es2_info = {
|
|||
| FLAG_DISABLE_FC_PAUSE_TIME /* errata */
|
||||
| FLAG_TIPG_MEDIUM_FOR_80003ESLAN,
|
||||
.pba = 38,
|
||||
.max_hw_frame_size = DEFAULT_JUMBO,
|
||||
.get_variants = e1000_get_variants_80003es2lan,
|
||||
.mac_ops = &es2_mac_ops,
|
||||
.phy_ops = &es2_phy_ops,
|
||||
|
|
|
@ -167,6 +167,15 @@ static int e1000_get_settings(struct net_device *netdev,
|
|||
|
||||
ecmd->autoneg = ((hw->phy.media_type == e1000_media_type_fiber) ||
|
||||
hw->mac.autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
|
||||
|
||||
/* MDI-X => 2; MDI =>1; Invalid =>0 */
|
||||
if ((hw->phy.media_type == e1000_media_type_copper) &&
|
||||
!hw->mac.get_link_status)
|
||||
ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X :
|
||||
ETH_TP_MDI;
|
||||
else
|
||||
ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -776,6 +785,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
|
|||
u32 after;
|
||||
u32 i;
|
||||
u32 toggle;
|
||||
u32 mask;
|
||||
|
||||
/*
|
||||
* The status register is Read Only, so a write should fail.
|
||||
|
@ -788,17 +798,9 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
|
|||
case e1000_80003es2lan:
|
||||
toggle = 0x7FFFF3FF;
|
||||
break;
|
||||
case e1000_82573:
|
||||
case e1000_82574:
|
||||
case e1000_82583:
|
||||
case e1000_ich8lan:
|
||||
case e1000_ich9lan:
|
||||
case e1000_ich10lan:
|
||||
default:
|
||||
toggle = 0x7FFFF033;
|
||||
break;
|
||||
default:
|
||||
toggle = 0xFFFFF833;
|
||||
break;
|
||||
}
|
||||
|
||||
before = er32(STATUS);
|
||||
|
@ -844,11 +846,18 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
|
|||
REG_PATTERN_TEST(E1000_TXCW, 0xC000FFFF, 0x0000FFFF);
|
||||
REG_PATTERN_TEST(E1000_TDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
|
||||
REG_PATTERN_TEST(E1000_TIDV, 0x0000FFFF, 0x0000FFFF);
|
||||
mask = 0x8003FFFF;
|
||||
switch (mac->type) {
|
||||
case e1000_ich10lan:
|
||||
case e1000_pchlan:
|
||||
mask |= (1 << 18);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
for (i = 0; i < mac->rar_entry_count; i++)
|
||||
REG_PATTERN_TEST_ARRAY(E1000_RA, ((i << 1) + 1),
|
||||
((mac->type == e1000_ich10lan) ?
|
||||
0x8007FFFF : 0x8003FFFF),
|
||||
0xFFFFFFFF);
|
||||
mask, 0xFFFFFFFF);
|
||||
|
||||
for (i = 0; i < mac->mta_reg_count; i++)
|
||||
REG_PATTERN_TEST_ARRAY(E1000_MTA, i, 0xFFFFFFFF, 0xFFFFFFFF);
|
||||
|
@ -1786,15 +1795,22 @@ static int e1000_set_wol(struct net_device *netdev,
|
|||
/* bit defines for adapter->led_status */
|
||||
#define E1000_LED_ON 0
|
||||
|
||||
static void e1000_led_blink_callback(unsigned long data)
|
||||
static void e1000e_led_blink_task(struct work_struct *work)
|
||||
{
|
||||
struct e1000_adapter *adapter = (struct e1000_adapter *) data;
|
||||
struct e1000_adapter *adapter = container_of(work,
|
||||
struct e1000_adapter, led_blink_task);
|
||||
|
||||
if (test_and_change_bit(E1000_LED_ON, &adapter->led_status))
|
||||
adapter->hw.mac.ops.led_off(&adapter->hw);
|
||||
else
|
||||
adapter->hw.mac.ops.led_on(&adapter->hw);
|
||||
}
|
||||
|
||||
static void e1000_led_blink_callback(unsigned long data)
|
||||
{
|
||||
struct e1000_adapter *adapter = (struct e1000_adapter *) data;
|
||||
|
||||
schedule_work(&adapter->led_blink_task);
|
||||
mod_timer(&adapter->blink_timer, jiffies + E1000_ID_INTERVAL);
|
||||
}
|
||||
|
||||
|
@ -1807,7 +1823,9 @@ static int e1000_phys_id(struct net_device *netdev, u32 data)
|
|||
data = INT_MAX;
|
||||
|
||||
if ((hw->phy.type == e1000_phy_ife) ||
|
||||
(hw->mac.type == e1000_pchlan) ||
|
||||
(hw->mac.type == e1000_82574)) {
|
||||
INIT_WORK(&adapter->led_blink_task, e1000e_led_blink_task);
|
||||
if (!adapter->blink_timer.function) {
|
||||
init_timer(&adapter->blink_timer);
|
||||
adapter->blink_timer.function =
|
||||
|
|
|
@ -193,7 +193,11 @@ enum e1e_registers {
|
|||
E1000_RXCSUM = 0x05000, /* Rx Checksum Control - RW */
|
||||
E1000_RFCTL = 0x05008, /* Receive Filter Control */
|
||||
E1000_MTA = 0x05200, /* Multicast Table Array - RW Array */
|
||||
E1000_RA = 0x05400, /* Receive Address - RW Array */
|
||||
E1000_RAL_BASE = 0x05400, /* Receive Address Low - RW */
|
||||
#define E1000_RAL(_n) (E1000_RAL_BASE + ((_n) * 8))
|
||||
#define E1000_RA (E1000_RAL(0))
|
||||
E1000_RAH_BASE = 0x05404, /* Receive Address High - RW */
|
||||
#define E1000_RAH(_n) (E1000_RAH_BASE + ((_n) * 8))
|
||||
E1000_VFTA = 0x05600, /* VLAN Filter Table Array - RW Array */
|
||||
E1000_WUC = 0x05800, /* Wakeup Control - RW */
|
||||
E1000_WUFC = 0x05808, /* Wakeup Filter Control - RW */
|
||||
|
@ -210,6 +214,7 @@ enum e1e_registers {
|
|||
E1000_FACTPS = 0x05B30, /* Function Active and Power State to MNG */
|
||||
E1000_SWSM = 0x05B50, /* SW Semaphore */
|
||||
E1000_FWSM = 0x05B54, /* FW Semaphore */
|
||||
E1000_SWSM2 = 0x05B58, /* Driver-only SW semaphore */
|
||||
E1000_HICR = 0x08F00, /* Host Interface Control */
|
||||
};
|
||||
|
||||
|
@ -368,6 +373,10 @@ enum e1e_registers {
|
|||
#define E1000_DEV_ID_ICH10_R_BM_V 0x10CE
|
||||
#define E1000_DEV_ID_ICH10_D_BM_LM 0x10DE
|
||||
#define E1000_DEV_ID_ICH10_D_BM_LF 0x10DF
|
||||
#define E1000_DEV_ID_PCH_M_HV_LM 0x10EA
|
||||
#define E1000_DEV_ID_PCH_M_HV_LC 0x10EB
|
||||
#define E1000_DEV_ID_PCH_D_HV_DM 0x10EF
|
||||
#define E1000_DEV_ID_PCH_D_HV_DC 0x10F0
|
||||
|
||||
#define E1000_REVISION_4 4
|
||||
|
||||
|
@ -383,6 +392,7 @@ enum e1000_mac_type {
|
|||
e1000_ich8lan,
|
||||
e1000_ich9lan,
|
||||
e1000_ich10lan,
|
||||
e1000_pchlan,
|
||||
};
|
||||
|
||||
enum e1000_media_type {
|
||||
|
@ -417,6 +427,8 @@ enum e1000_phy_type {
|
|||
e1000_phy_igp_3,
|
||||
e1000_phy_ife,
|
||||
e1000_phy_bm,
|
||||
e1000_phy_82578,
|
||||
e1000_phy_82577,
|
||||
};
|
||||
|
||||
enum e1000_bus_width {
|
||||
|
@ -720,6 +732,7 @@ struct e1000_host_mng_command_info {
|
|||
|
||||
/* Function pointers and static data for the MAC. */
|
||||
struct e1000_mac_operations {
|
||||
s32 (*id_led_init)(struct e1000_hw *);
|
||||
bool (*check_mng_mode)(struct e1000_hw *);
|
||||
s32 (*check_for_link)(struct e1000_hw *);
|
||||
s32 (*cleanup_led)(struct e1000_hw *);
|
||||
|
@ -733,11 +746,13 @@ struct e1000_mac_operations {
|
|||
s32 (*init_hw)(struct e1000_hw *);
|
||||
s32 (*setup_link)(struct e1000_hw *);
|
||||
s32 (*setup_physical_interface)(struct e1000_hw *);
|
||||
s32 (*setup_led)(struct e1000_hw *);
|
||||
};
|
||||
|
||||
/* Function pointers for the PHY. */
|
||||
struct e1000_phy_operations {
|
||||
s32 (*acquire_phy)(struct e1000_hw *);
|
||||
s32 (*check_polarity)(struct e1000_hw *);
|
||||
s32 (*check_reset_block)(struct e1000_hw *);
|
||||
s32 (*commit_phy)(struct e1000_hw *);
|
||||
s32 (*force_speed_duplex)(struct e1000_hw *);
|
||||
|
@ -869,6 +884,7 @@ struct e1000_fc_info {
|
|||
struct e1000_dev_spec_82571 {
|
||||
bool laa_is_present;
|
||||
bool alt_mac_addr_is_present;
|
||||
u32 smb_counter;
|
||||
};
|
||||
|
||||
struct e1000_shadow_ram {
|
||||
|
|
|
@ -48,6 +48,10 @@
|
|||
* 82567LF-3 Gigabit Network Connection
|
||||
* 82567LM-3 Gigabit Network Connection
|
||||
* 82567LM-4 Gigabit Network Connection
|
||||
* 82577LM Gigabit Network Connection
|
||||
* 82577LC Gigabit Network Connection
|
||||
* 82578DM Gigabit Network Connection
|
||||
* 82578DC Gigabit Network Connection
|
||||
*/
|
||||
|
||||
#include <linux/netdevice.h>
|
||||
|
@ -116,6 +120,8 @@
|
|||
#define IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK 0x0300
|
||||
#define IGP3_VR_CTRL_MODE_SHUTDOWN 0x0200
|
||||
|
||||
#define HV_LED_CONFIG PHY_REG(768, 30) /* LED Configuration */
|
||||
|
||||
/* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
|
||||
/* Offset 04h HSFSTS */
|
||||
union ich8_hws_flash_status {
|
||||
|
@ -186,6 +192,14 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
|
|||
static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
|
||||
static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
|
||||
static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
|
||||
static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
|
||||
static s32 e1000_led_on_ich8lan(struct e1000_hw *hw);
|
||||
static s32 e1000_led_off_ich8lan(struct e1000_hw *hw);
|
||||
static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
|
||||
static s32 e1000_setup_led_pchlan(struct e1000_hw *hw);
|
||||
static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw);
|
||||
static s32 e1000_led_on_pchlan(struct e1000_hw *hw);
|
||||
static s32 e1000_led_off_pchlan(struct e1000_hw *hw);
|
||||
|
||||
static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg)
|
||||
{
|
||||
|
@ -212,6 +226,41 @@ static inline void __ew32flash(struct e1000_hw *hw, unsigned long reg, u32 val)
|
|||
#define ew16flash(reg,val) __ew16flash(hw, (reg), (val))
|
||||
#define ew32flash(reg,val) __ew32flash(hw, (reg), (val))
|
||||
|
||||
/**
|
||||
* e1000_init_phy_params_pchlan - Initialize PHY function pointers
|
||||
* @hw: pointer to the HW structure
|
||||
*
|
||||
* Initialize family-specific PHY parameters and function pointers.
|
||||
**/
|
||||
static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
|
||||
{
|
||||
struct e1000_phy_info *phy = &hw->phy;
|
||||
s32 ret_val = 0;
|
||||
|
||||
phy->addr = 1;
|
||||
phy->reset_delay_us = 100;
|
||||
|
||||
phy->ops.check_polarity = e1000_check_polarity_ife_ich8lan;
|
||||
phy->ops.read_phy_reg = e1000_read_phy_reg_hv;
|
||||
phy->ops.write_phy_reg = e1000_write_phy_reg_hv;
|
||||
phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
|
||||
|
||||
phy->id = e1000_phy_unknown;
|
||||
e1000e_get_phy_id(hw);
|
||||
phy->type = e1000e_get_phy_type_from_id(phy->id);
|
||||
|
||||
if (phy->type == e1000_phy_82577) {
|
||||
phy->ops.check_polarity = e1000_check_polarity_82577;
|
||||
phy->ops.force_speed_duplex =
|
||||
e1000_phy_force_speed_duplex_82577;
|
||||
phy->ops.get_cable_length = e1000_get_cable_length_82577;
|
||||
phy->ops.get_phy_info = e1000_get_phy_info_82577;
|
||||
phy->ops.commit_phy = e1000e_phy_sw_reset;
|
||||
}
|
||||
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_init_phy_params_ich8lan - Initialize PHY function pointers
|
||||
* @hw: pointer to the HW structure
|
||||
|
@ -273,6 +322,8 @@ static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
|
|||
break;
|
||||
}
|
||||
|
||||
phy->ops.check_polarity = e1000_check_polarity_ife_ich8lan;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -358,6 +409,36 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter)
|
|||
/* Set if manageability features are enabled. */
|
||||
mac->arc_subsystem_valid = 1;
|
||||
|
||||
/* LED operations */
|
||||
switch (mac->type) {
|
||||
case e1000_ich8lan:
|
||||
case e1000_ich9lan:
|
||||
case e1000_ich10lan:
|
||||
/* ID LED init */
|
||||
mac->ops.id_led_init = e1000e_id_led_init;
|
||||
/* setup LED */
|
||||
mac->ops.setup_led = e1000e_setup_led_generic;
|
||||
/* cleanup LED */
|
||||
mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
|
||||
/* turn on/off LED */
|
||||
mac->ops.led_on = e1000_led_on_ich8lan;
|
||||
mac->ops.led_off = e1000_led_off_ich8lan;
|
||||
break;
|
||||
case e1000_pchlan:
|
||||
/* ID LED init */
|
||||
mac->ops.id_led_init = e1000_id_led_init_pchlan;
|
||||
/* setup LED */
|
||||
mac->ops.setup_led = e1000_setup_led_pchlan;
|
||||
/* cleanup LED */
|
||||
mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
|
||||
/* turn on/off LED */
|
||||
mac->ops.led_on = e1000_led_on_pchlan;
|
||||
mac->ops.led_off = e1000_led_off_pchlan;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
/* Enable PCS Lock-loss workaround for ICH8 */
|
||||
if (mac->type == e1000_ich8lan)
|
||||
e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, 1);
|
||||
|
@ -378,10 +459,18 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
|
|||
if (rc)
|
||||
return rc;
|
||||
|
||||
rc = e1000_init_phy_params_ich8lan(hw);
|
||||
if (hw->mac.type == e1000_pchlan)
|
||||
rc = e1000_init_phy_params_pchlan(hw);
|
||||
else
|
||||
rc = e1000_init_phy_params_ich8lan(hw);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
if (adapter->hw.phy.type == e1000_phy_ife) {
|
||||
adapter->flags &= ~FLAG_HAS_JUMBO_FRAMES;
|
||||
adapter->max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN;
|
||||
}
|
||||
|
||||
if ((adapter->hw.mac.type == e1000_ich8lan) &&
|
||||
(adapter->hw.phy.type == e1000_phy_igp_3))
|
||||
adapter->flags |= FLAG_LSC_GIG_SPEED_DROP;
|
||||
|
@ -410,12 +499,15 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
|
|||
|
||||
while (timeout) {
|
||||
extcnf_ctrl = er32(EXTCNF_CTRL);
|
||||
extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
|
||||
ew32(EXTCNF_CTRL, extcnf_ctrl);
|
||||
|
||||
extcnf_ctrl = er32(EXTCNF_CTRL);
|
||||
if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
|
||||
break;
|
||||
if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)) {
|
||||
extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
|
||||
ew32(EXTCNF_CTRL, extcnf_ctrl);
|
||||
|
||||
extcnf_ctrl = er32(EXTCNF_CTRL);
|
||||
if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
|
||||
break;
|
||||
}
|
||||
mdelay(1);
|
||||
timeout--;
|
||||
}
|
||||
|
@ -554,6 +646,53 @@ static s32 e1000_phy_force_speed_duplex_ich8lan(struct e1000_hw *hw)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
|
||||
* done after every PHY reset.
|
||||
**/
|
||||
static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
|
||||
{
|
||||
s32 ret_val = 0;
|
||||
|
||||
if (hw->mac.type != e1000_pchlan)
|
||||
return ret_val;
|
||||
|
||||
if (((hw->phy.type == e1000_phy_82577) &&
|
||||
((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
|
||||
((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
|
||||
/* Disable generation of early preamble */
|
||||
ret_val = e1e_wphy(hw, PHY_REG(769, 25), 0x4431);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
|
||||
/* Preamble tuning for SSC */
|
||||
ret_val = e1e_wphy(hw, PHY_REG(770, 16), 0xA204);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
if (hw->phy.type == e1000_phy_82578) {
|
||||
/*
|
||||
* Return registers to default by doing a soft reset then
|
||||
* writing 0x3140 to the control register.
|
||||
*/
|
||||
if (hw->phy.revision < 2) {
|
||||
e1000e_phy_sw_reset(hw);
|
||||
ret_val = e1e_wphy(hw, PHY_CONTROL, 0x3140);
|
||||
}
|
||||
}
|
||||
|
||||
/* Select page 0 */
|
||||
ret_val = hw->phy.ops.acquire_phy(hw);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
hw->phy.addr = 1;
|
||||
e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
|
||||
hw->phy.ops.release_phy(hw);
|
||||
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_phy_hw_reset_ich8lan - Performs a PHY reset
|
||||
* @hw: pointer to the HW structure
|
||||
|
@ -575,6 +714,12 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
|
|||
if (ret_val)
|
||||
return ret_val;
|
||||
|
||||
if (hw->mac.type == e1000_pchlan) {
|
||||
ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize the PHY from the NVM on ICH platforms. This
|
||||
* is needed due to an issue where the NVM configuration is
|
||||
|
@ -701,7 +846,7 @@ static s32 e1000_get_phy_info_ife_ich8lan(struct e1000_hw *hw)
|
|||
phy->polarity_correction = (!(data & IFE_PSC_AUTO_POLARITY_DISABLE));
|
||||
|
||||
if (phy->polarity_correction) {
|
||||
ret_val = e1000_check_polarity_ife_ich8lan(hw);
|
||||
ret_val = phy->ops.check_polarity(hw);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
} else {
|
||||
|
@ -741,6 +886,8 @@ static s32 e1000_get_phy_info_ich8lan(struct e1000_hw *hw)
|
|||
break;
|
||||
case e1000_phy_igp_3:
|
||||
case e1000_phy_bm:
|
||||
case e1000_phy_82578:
|
||||
case e1000_phy_82577:
|
||||
return e1000e_get_phy_info_igp(hw);
|
||||
break;
|
||||
default:
|
||||
|
@ -1851,6 +1998,79 @@ static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_id_led_init_pchlan - store LED configurations
|
||||
* @hw: pointer to the HW structure
|
||||
*
|
||||
* PCH does not control LEDs via the LEDCTL register, rather it uses
|
||||
* the PHY LED configuration register.
|
||||
*
|
||||
* PCH also does not have an "always on" or "always off" mode which
|
||||
* complicates the ID feature. Instead of using the "on" mode to indicate
|
||||
* in ledctl_mode2 the LEDs to use for ID (see e1000e_id_led_init()),
|
||||
* use "link_up" mode. The LEDs will still ID on request if there is no
|
||||
* link based on logic in e1000_led_[on|off]_pchlan().
|
||||
**/
|
||||
static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
|
||||
{
|
||||
struct e1000_mac_info *mac = &hw->mac;
|
||||
s32 ret_val;
|
||||
const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
|
||||
const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
|
||||
u16 data, i, temp, shift;
|
||||
|
||||
/* Get default ID LED modes */
|
||||
ret_val = hw->nvm.ops.valid_led_default(hw, &data);
|
||||
if (ret_val)
|
||||
goto out;
|
||||
|
||||
mac->ledctl_default = er32(LEDCTL);
|
||||
mac->ledctl_mode1 = mac->ledctl_default;
|
||||
mac->ledctl_mode2 = mac->ledctl_default;
|
||||
|
||||
for (i = 0; i < 4; i++) {
|
||||
temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
|
||||
shift = (i * 5);
|
||||
switch (temp) {
|
||||
case ID_LED_ON1_DEF2:
|
||||
case ID_LED_ON1_ON2:
|
||||
case ID_LED_ON1_OFF2:
|
||||
mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
|
||||
mac->ledctl_mode1 |= (ledctl_on << shift);
|
||||
break;
|
||||
case ID_LED_OFF1_DEF2:
|
||||
case ID_LED_OFF1_ON2:
|
||||
case ID_LED_OFF1_OFF2:
|
||||
mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
|
||||
mac->ledctl_mode1 |= (ledctl_off << shift);
|
||||
break;
|
||||
default:
|
||||
/* Do nothing */
|
||||
break;
|
||||
}
|
||||
switch (temp) {
|
||||
case ID_LED_DEF1_ON2:
|
||||
case ID_LED_ON1_ON2:
|
||||
case ID_LED_OFF1_ON2:
|
||||
mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
|
||||
mac->ledctl_mode2 |= (ledctl_on << shift);
|
||||
break;
|
||||
case ID_LED_DEF1_OFF2:
|
||||
case ID_LED_ON1_OFF2:
|
||||
case ID_LED_OFF1_OFF2:
|
||||
mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
|
||||
mac->ledctl_mode2 |= (ledctl_off << shift);
|
||||
break;
|
||||
default:
|
||||
/* Do nothing */
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_get_bus_info_ich8lan - Get/Set the bus type and width
|
||||
* @hw: pointer to the HW structure
|
||||
|
@ -1960,6 +2180,9 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
|
|||
kab |= E1000_KABGTXD_BGSQLBIAS;
|
||||
ew32(KABGTXD, kab);
|
||||
|
||||
if (hw->mac.type == e1000_pchlan)
|
||||
ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
|
||||
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
|
@ -1985,7 +2208,7 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
|
|||
e1000_initialize_hw_bits_ich8lan(hw);
|
||||
|
||||
/* Initialize identification LED */
|
||||
ret_val = e1000e_id_led_init(hw);
|
||||
ret_val = mac->ops.id_led_init(hw);
|
||||
if (ret_val) {
|
||||
hw_dbg(hw, "Error initializing identification LED\n");
|
||||
return ret_val;
|
||||
|
@ -2030,6 +2253,16 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
|
|||
ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
|
||||
ew32(CTRL_EXT, ctrl_ext);
|
||||
|
||||
/*
|
||||
* The 82578 Rx buffer will stall if wakeup is enabled in host and
|
||||
* the ME. Reading the BM_WUC register will clear the host wakeup bit.
|
||||
* Reset the phy after disabling host wakeup to reset the Rx buffer.
|
||||
*/
|
||||
if (hw->phy.type == e1000_phy_82578) {
|
||||
e1e_rphy(hw, BM_WUC, &i);
|
||||
e1000e_phy_hw_reset_generic(hw);
|
||||
}
|
||||
|
||||
/*
|
||||
* Clear all of the statistics registers (clear on read). It is
|
||||
* important that we do this after we have tried to establish link
|
||||
|
@ -2054,6 +2287,9 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
|
|||
/* Extended Device Control */
|
||||
reg = er32(CTRL_EXT);
|
||||
reg |= (1 << 22);
|
||||
/* Enable PHY low-power state when MAC is at D3 w/o WoL */
|
||||
if (hw->mac.type >= e1000_pchlan)
|
||||
reg |= E1000_CTRL_EXT_PHYPDEN;
|
||||
ew32(CTRL_EXT, reg);
|
||||
|
||||
/* Transmit Descriptor Control 0 */
|
||||
|
@ -2112,8 +2348,13 @@ static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
|
|||
* the default flow control setting, so we explicitly
|
||||
* set it to full.
|
||||
*/
|
||||
if (hw->fc.requested_mode == e1000_fc_default)
|
||||
hw->fc.requested_mode = e1000_fc_full;
|
||||
if (hw->fc.requested_mode == e1000_fc_default) {
|
||||
/* Workaround h/w hang when Tx flow control enabled */
|
||||
if (hw->mac.type == e1000_pchlan)
|
||||
hw->fc.requested_mode = e1000_fc_rx_pause;
|
||||
else
|
||||
hw->fc.requested_mode = e1000_fc_full;
|
||||
}
|
||||
|
||||
/*
|
||||
* Save off the requested flow control mode for use later. Depending
|
||||
|
@ -2130,6 +2371,14 @@ static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
|
|||
return ret_val;
|
||||
|
||||
ew32(FCTTV, hw->fc.pause_time);
|
||||
if ((hw->phy.type == e1000_phy_82578) ||
|
||||
(hw->phy.type == e1000_phy_82577)) {
|
||||
ret_val = hw->phy.ops.write_phy_reg(hw,
|
||||
PHY_REG(BM_PORT_CTRL_PAGE, 27),
|
||||
hw->fc.pause_time);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
return e1000e_set_fc_watermarks(hw);
|
||||
}
|
||||
|
@ -2169,18 +2418,26 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
|
|||
if (ret_val)
|
||||
return ret_val;
|
||||
|
||||
if (hw->phy.type == e1000_phy_igp_3) {
|
||||
switch (hw->phy.type) {
|
||||
case e1000_phy_igp_3:
|
||||
ret_val = e1000e_copper_link_setup_igp(hw);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
} else if (hw->phy.type == e1000_phy_bm) {
|
||||
break;
|
||||
case e1000_phy_bm:
|
||||
case e1000_phy_82578:
|
||||
ret_val = e1000e_copper_link_setup_m88(hw);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
if (hw->phy.type == e1000_phy_ife) {
|
||||
ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, ®_data);
|
||||
break;
|
||||
case e1000_phy_82577:
|
||||
ret_val = e1000_copper_link_setup_82577(hw);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
break;
|
||||
case e1000_phy_ife:
|
||||
ret_val = hw->phy.ops.read_phy_reg(hw, IFE_PHY_MDIX_CONTROL,
|
||||
®_data);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
|
||||
|
@ -2198,9 +2455,13 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
|
|||
reg_data |= IFE_PMC_AUTO_MDIX;
|
||||
break;
|
||||
}
|
||||
ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, reg_data);
|
||||
ret_val = hw->phy.ops.write_phy_reg(hw, IFE_PHY_MDIX_CONTROL,
|
||||
reg_data);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return e1000e_setup_copper_link(hw);
|
||||
}
|
||||
|
@ -2417,18 +2678,26 @@ void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
|
|||
* 'LPLU Enabled' and 'Gig Disable' to force link speed negotiation
|
||||
* to a lower speed.
|
||||
*
|
||||
* Should only be called for ICH9 and ICH10 devices.
|
||||
* Should only be called for applicable parts.
|
||||
**/
|
||||
void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw)
|
||||
{
|
||||
u32 phy_ctrl;
|
||||
|
||||
if ((hw->mac.type == e1000_ich10lan) ||
|
||||
(hw->mac.type == e1000_ich9lan)) {
|
||||
switch (hw->mac.type) {
|
||||
case e1000_ich9lan:
|
||||
case e1000_ich10lan:
|
||||
case e1000_pchlan:
|
||||
phy_ctrl = er32(PHY_CTRL);
|
||||
phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU |
|
||||
E1000_PHY_CTRL_GBE_DISABLE;
|
||||
ew32(PHY_CTRL, phy_ctrl);
|
||||
|
||||
/* Workaround SWFLAG unexpectedly set during S0->Sx */
|
||||
if (hw->mac.type == e1000_pchlan)
|
||||
udelay(500);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return;
|
||||
|
@ -2481,6 +2750,92 @@ static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_setup_led_pchlan - Configures SW controllable LED
|
||||
* @hw: pointer to the HW structure
|
||||
*
|
||||
* This prepares the SW controllable LED for use.
|
||||
**/
|
||||
static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
|
||||
{
|
||||
return hw->phy.ops.write_phy_reg(hw, HV_LED_CONFIG,
|
||||
(u16)hw->mac.ledctl_mode1);
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_cleanup_led_pchlan - Restore the default LED operation
|
||||
* @hw: pointer to the HW structure
|
||||
*
|
||||
* Return the LED back to the default configuration.
|
||||
**/
|
||||
static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
|
||||
{
|
||||
return hw->phy.ops.write_phy_reg(hw, HV_LED_CONFIG,
|
||||
(u16)hw->mac.ledctl_default);
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_led_on_pchlan - Turn LEDs on
|
||||
* @hw: pointer to the HW structure
|
||||
*
|
||||
* Turn on the LEDs.
|
||||
**/
|
||||
static s32 e1000_led_on_pchlan(struct e1000_hw *hw)
|
||||
{
|
||||
u16 data = (u16)hw->mac.ledctl_mode2;
|
||||
u32 i, led;
|
||||
|
||||
/*
|
||||
* If no link, then turn LED on by setting the invert bit
|
||||
* for each LED that's mode is "link_up" in ledctl_mode2.
|
||||
*/
|
||||
if (!(er32(STATUS) & E1000_STATUS_LU)) {
|
||||
for (i = 0; i < 3; i++) {
|
||||
led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
|
||||
if ((led & E1000_PHY_LED0_MODE_MASK) !=
|
||||
E1000_LEDCTL_MODE_LINK_UP)
|
||||
continue;
|
||||
if (led & E1000_PHY_LED0_IVRT)
|
||||
data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
|
||||
else
|
||||
data |= (E1000_PHY_LED0_IVRT << (i * 5));
|
||||
}
|
||||
}
|
||||
|
||||
return hw->phy.ops.write_phy_reg(hw, HV_LED_CONFIG, data);
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_led_off_pchlan - Turn LEDs off
|
||||
* @hw: pointer to the HW structure
|
||||
*
|
||||
* Turn off the LEDs.
|
||||
**/
|
||||
static s32 e1000_led_off_pchlan(struct e1000_hw *hw)
|
||||
{
|
||||
u16 data = (u16)hw->mac.ledctl_mode1;
|
||||
u32 i, led;
|
||||
|
||||
/*
|
||||
* If no link, then turn LED off by clearing the invert bit
|
||||
* for each LED that's mode is "link_up" in ledctl_mode1.
|
||||
*/
|
||||
if (!(er32(STATUS) & E1000_STATUS_LU)) {
|
||||
for (i = 0; i < 3; i++) {
|
||||
led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
|
||||
if ((led & E1000_PHY_LED0_MODE_MASK) !=
|
||||
E1000_LEDCTL_MODE_LINK_UP)
|
||||
continue;
|
||||
if (led & E1000_PHY_LED0_IVRT)
|
||||
data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
|
||||
else
|
||||
data |= (E1000_PHY_LED0_IVRT << (i * 5));
|
||||
}
|
||||
}
|
||||
|
||||
return hw->phy.ops.write_phy_reg(hw, HV_LED_CONFIG, data);
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_get_cfg_done_ich8lan - Read config done bit
|
||||
* @hw: pointer to the HW structure
|
||||
|
@ -2488,7 +2843,7 @@ static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
|
|||
* Read the management control register for the config done bit for
|
||||
* completion status. NOTE: silicon which is EEPROM-less will fail trying
|
||||
* to read the config done bit, so an error is *ONLY* logged and returns
|
||||
* E1000_SUCCESS. If we were to return with error, EEPROM-less silicon
|
||||
* 0. If we were to return with error, EEPROM-less silicon
|
||||
* would not be able to be reset or change link.
|
||||
**/
|
||||
static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
|
||||
|
@ -2498,7 +2853,8 @@ static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
|
|||
e1000e_get_cfg_done(hw);
|
||||
|
||||
/* If EEPROM is not marked present, init the IGP 3 PHY manually */
|
||||
if (hw->mac.type != e1000_ich10lan) {
|
||||
if ((hw->mac.type != e1000_ich10lan) &&
|
||||
(hw->mac.type != e1000_pchlan)) {
|
||||
if (((er32(EECD) & E1000_EECD_PRES) == 0) &&
|
||||
(hw->phy.type == e1000_phy_igp_3)) {
|
||||
e1000e_phy_init_script_igp3(hw);
|
||||
|
@ -2524,6 +2880,7 @@ static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
|
|||
static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
|
||||
{
|
||||
u32 temp;
|
||||
u16 phy_data;
|
||||
|
||||
e1000e_clear_hw_cntrs_base(hw);
|
||||
|
||||
|
@ -2541,22 +2898,42 @@ static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
|
|||
temp = er32(IAC);
|
||||
temp = er32(ICRXOC);
|
||||
|
||||
/* Clear PHY statistics registers */
|
||||
if ((hw->phy.type == e1000_phy_82578) ||
|
||||
(hw->phy.type == e1000_phy_82577)) {
|
||||
hw->phy.ops.read_phy_reg(hw, HV_SCC_UPPER, &phy_data);
|
||||
hw->phy.ops.read_phy_reg(hw, HV_SCC_LOWER, &phy_data);
|
||||
hw->phy.ops.read_phy_reg(hw, HV_ECOL_UPPER, &phy_data);
|
||||
hw->phy.ops.read_phy_reg(hw, HV_ECOL_LOWER, &phy_data);
|
||||
hw->phy.ops.read_phy_reg(hw, HV_MCC_UPPER, &phy_data);
|
||||
hw->phy.ops.read_phy_reg(hw, HV_MCC_LOWER, &phy_data);
|
||||
hw->phy.ops.read_phy_reg(hw, HV_LATECOL_UPPER, &phy_data);
|
||||
hw->phy.ops.read_phy_reg(hw, HV_LATECOL_LOWER, &phy_data);
|
||||
hw->phy.ops.read_phy_reg(hw, HV_COLC_UPPER, &phy_data);
|
||||
hw->phy.ops.read_phy_reg(hw, HV_COLC_LOWER, &phy_data);
|
||||
hw->phy.ops.read_phy_reg(hw, HV_DC_UPPER, &phy_data);
|
||||
hw->phy.ops.read_phy_reg(hw, HV_DC_LOWER, &phy_data);
|
||||
hw->phy.ops.read_phy_reg(hw, HV_TNCRS_UPPER, &phy_data);
|
||||
hw->phy.ops.read_phy_reg(hw, HV_TNCRS_LOWER, &phy_data);
|
||||
}
|
||||
}
|
||||
|
||||
static struct e1000_mac_operations ich8_mac_ops = {
|
||||
.id_led_init = e1000e_id_led_init,
|
||||
.check_mng_mode = e1000_check_mng_mode_ich8lan,
|
||||
.check_for_link = e1000e_check_for_copper_link,
|
||||
.cleanup_led = e1000_cleanup_led_ich8lan,
|
||||
/* cleanup_led dependent on mac type */
|
||||
.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan,
|
||||
.get_bus_info = e1000_get_bus_info_ich8lan,
|
||||
.get_link_up_info = e1000_get_link_up_info_ich8lan,
|
||||
.led_on = e1000_led_on_ich8lan,
|
||||
.led_off = e1000_led_off_ich8lan,
|
||||
/* led_on dependent on mac type */
|
||||
/* led_off dependent on mac type */
|
||||
.update_mc_addr_list = e1000e_update_mc_addr_list_generic,
|
||||
.reset_hw = e1000_reset_hw_ich8lan,
|
||||
.init_hw = e1000_init_hw_ich8lan,
|
||||
.setup_link = e1000_setup_link_ich8lan,
|
||||
.setup_physical_interface= e1000_setup_copper_link_ich8lan,
|
||||
/* id_led_init dependent on mac type */
|
||||
};
|
||||
|
||||
static struct e1000_phy_operations ich8_phy_ops = {
|
||||
|
@ -2595,6 +2972,7 @@ struct e1000_info e1000_ich8_info = {
|
|||
| FLAG_HAS_FLASH
|
||||
| FLAG_APME_IN_WUC,
|
||||
.pba = 8,
|
||||
.max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN,
|
||||
.get_variants = e1000_get_variants_ich8lan,
|
||||
.mac_ops = &ich8_mac_ops,
|
||||
.phy_ops = &ich8_phy_ops,
|
||||
|
@ -2613,6 +2991,7 @@ struct e1000_info e1000_ich9_info = {
|
|||
| FLAG_HAS_FLASH
|
||||
| FLAG_APME_IN_WUC,
|
||||
.pba = 10,
|
||||
.max_hw_frame_size = DEFAULT_JUMBO,
|
||||
.get_variants = e1000_get_variants_ich8lan,
|
||||
.mac_ops = &ich8_mac_ops,
|
||||
.phy_ops = &ich8_phy_ops,
|
||||
|
@ -2631,6 +3010,25 @@ struct e1000_info e1000_ich10_info = {
|
|||
| FLAG_HAS_FLASH
|
||||
| FLAG_APME_IN_WUC,
|
||||
.pba = 10,
|
||||
.max_hw_frame_size = DEFAULT_JUMBO,
|
||||
.get_variants = e1000_get_variants_ich8lan,
|
||||
.mac_ops = &ich8_mac_ops,
|
||||
.phy_ops = &ich8_phy_ops,
|
||||
.nvm_ops = &ich8_nvm_ops,
|
||||
};
|
||||
|
||||
struct e1000_info e1000_pch_info = {
|
||||
.mac = e1000_pchlan,
|
||||
.flags = FLAG_IS_ICH
|
||||
| FLAG_HAS_WOL
|
||||
| FLAG_RX_CSUM_ENABLED
|
||||
| FLAG_HAS_CTRLEXT_ON_LOAD
|
||||
| FLAG_HAS_AMT
|
||||
| FLAG_HAS_FLASH
|
||||
| FLAG_HAS_JUMBO_FRAMES
|
||||
| FLAG_APME_IN_WUC,
|
||||
.pba = 26,
|
||||
.max_hw_frame_size = 4096,
|
||||
.get_variants = e1000_get_variants_ich8lan,
|
||||
.mac_ops = &ich8_mac_ops,
|
||||
.phy_ops = &ich8_phy_ops,
|
||||
|
|
|
@ -378,6 +378,12 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
|
|||
|
||||
mac->get_link_status = 0;
|
||||
|
||||
if (hw->phy.type == e1000_phy_82578) {
|
||||
ret_val = e1000_link_stall_workaround_hv(hw);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if there was DownShift, must be checked
|
||||
* immediately after link-up
|
||||
|
@ -1405,6 +1411,38 @@ s32 e1000e_id_led_init(struct e1000_hw *hw)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000e_setup_led_generic - Configures SW controllable LED
|
||||
* @hw: pointer to the HW structure
|
||||
*
|
||||
* This prepares the SW controllable LED for use and saves the current state
|
||||
* of the LED so it can be later restored.
|
||||
**/
|
||||
s32 e1000e_setup_led_generic(struct e1000_hw *hw)
|
||||
{
|
||||
u32 ledctl;
|
||||
|
||||
if (hw->mac.ops.setup_led != e1000e_setup_led_generic) {
|
||||
return -E1000_ERR_CONFIG;
|
||||
}
|
||||
|
||||
if (hw->phy.media_type == e1000_media_type_fiber) {
|
||||
ledctl = er32(LEDCTL);
|
||||
hw->mac.ledctl_default = ledctl;
|
||||
/* Turn off LED0 */
|
||||
ledctl &= ~(E1000_LEDCTL_LED0_IVRT |
|
||||
E1000_LEDCTL_LED0_BLINK |
|
||||
E1000_LEDCTL_LED0_MODE_MASK);
|
||||
ledctl |= (E1000_LEDCTL_MODE_LED_OFF <<
|
||||
E1000_LEDCTL_LED0_MODE_SHIFT);
|
||||
ew32(LEDCTL, ledctl);
|
||||
} else if (hw->phy.media_type == e1000_media_type_copper) {
|
||||
ew32(LEDCTL, hw->mac.ledctl_mode1);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000e_cleanup_led_generic - Set LED config to default operation
|
||||
* @hw: pointer to the HW structure
|
||||
|
|
|
@ -48,7 +48,7 @@
|
|||
|
||||
#include "e1000.h"
|
||||
|
||||
#define DRV_VERSION "0.3.3.4-k4"
|
||||
#define DRV_VERSION "1.0.2-k2"
|
||||
char e1000e_driver_name[] = "e1000e";
|
||||
const char e1000e_driver_version[] = DRV_VERSION;
|
||||
|
||||
|
@ -62,6 +62,7 @@ static const struct e1000_info *e1000_info_tbl[] = {
|
|||
[board_ich8lan] = &e1000_ich8_info,
|
||||
[board_ich9lan] = &e1000_ich9_info,
|
||||
[board_ich10lan] = &e1000_ich10_info,
|
||||
[board_pchlan] = &e1000_pch_info,
|
||||
};
|
||||
|
||||
#ifdef DEBUG
|
||||
|
@ -2255,8 +2256,6 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
|
|||
ew32(TARC(1), tarc);
|
||||
}
|
||||
|
||||
e1000e_config_collision_dist(hw);
|
||||
|
||||
/* Setup Transmit Descriptor Settings for eop descriptor */
|
||||
adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
|
||||
|
||||
|
@ -2269,6 +2268,8 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
|
|||
|
||||
ew32(TCTL, tctl);
|
||||
|
||||
e1000e_config_collision_dist(hw);
|
||||
|
||||
adapter->tx_queue_len = adapter->netdev->tx_queue_len;
|
||||
}
|
||||
|
||||
|
@ -2308,6 +2309,23 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
|
|||
if (adapter->flags2 & FLAG2_CRC_STRIPPING)
|
||||
rctl |= E1000_RCTL_SECRC;
|
||||
|
||||
/* Workaround Si errata on 82577 PHY - configure IPG for jumbos */
|
||||
if ((hw->phy.type == e1000_phy_82577) && (rctl & E1000_RCTL_LPE)) {
|
||||
u16 phy_data;
|
||||
|
||||
e1e_rphy(hw, PHY_REG(770, 26), &phy_data);
|
||||
phy_data &= 0xfff8;
|
||||
phy_data |= (1 << 2);
|
||||
e1e_wphy(hw, PHY_REG(770, 26), phy_data);
|
||||
|
||||
e1e_rphy(hw, 22, &phy_data);
|
||||
phy_data &= 0x0fff;
|
||||
phy_data |= (1 << 14);
|
||||
e1e_wphy(hw, 0x10, 0x2823);
|
||||
e1e_wphy(hw, 0x11, 0x0003);
|
||||
e1e_wphy(hw, 22, phy_data);
|
||||
}
|
||||
|
||||
/* Setup buffer sizes */
|
||||
rctl &= ~E1000_RCTL_SZ_4096;
|
||||
rctl |= E1000_RCTL_BSEX;
|
||||
|
@ -2751,23 +2769,25 @@ void e1000e_reset(struct e1000_adapter *adapter)
|
|||
/*
|
||||
* flow control settings
|
||||
*
|
||||
* The high water mark must be low enough to fit one full frame
|
||||
* The high water mark must be low enough to fit two full frame
|
||||
* (or the size used for early receive) above it in the Rx FIFO.
|
||||
* Set it to the lower of:
|
||||
* - 90% of the Rx FIFO size, and
|
||||
* - the full Rx FIFO size minus the early receive size (for parts
|
||||
* with ERT support assuming ERT set to E1000_ERT_2048), or
|
||||
* - the full Rx FIFO size minus one full frame
|
||||
* - the full Rx FIFO size minus two full frames
|
||||
*/
|
||||
if (adapter->flags & FLAG_HAS_ERT)
|
||||
if ((adapter->flags & FLAG_HAS_ERT) &&
|
||||
(adapter->netdev->mtu > ETH_DATA_LEN))
|
||||
hwm = min(((pba << 10) * 9 / 10),
|
||||
((pba << 10) - (E1000_ERT_2048 << 3)));
|
||||
else
|
||||
hwm = min(((pba << 10) * 9 / 10),
|
||||
((pba << 10) - adapter->max_frame_size));
|
||||
((pba << 10) - (2 * adapter->max_frame_size)));
|
||||
|
||||
fc->high_water = hwm & 0xFFF8; /* 8-byte granularity */
|
||||
fc->low_water = fc->high_water - 8;
|
||||
fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */
|
||||
fc->low_water = (fc->high_water - (2 * adapter->max_frame_size));
|
||||
fc->low_water &= E1000_FCRTL_RTL; /* 8-byte granularity */
|
||||
|
||||
if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME)
|
||||
fc->pause_time = 0xFFFF;
|
||||
|
@ -2787,6 +2807,8 @@ void e1000e_reset(struct e1000_adapter *adapter)
|
|||
e1000_get_hw_control(adapter);
|
||||
|
||||
ew32(WUC, 0);
|
||||
if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP)
|
||||
e1e_wphy(&adapter->hw, BM_WUC, 0);
|
||||
|
||||
if (mac->ops.init_hw(hw))
|
||||
e_err("Hardware Error\n");
|
||||
|
@ -2799,7 +2821,8 @@ void e1000e_reset(struct e1000_adapter *adapter)
|
|||
e1000e_reset_adaptive(hw);
|
||||
e1000_get_phy_info(hw);
|
||||
|
||||
if (!(adapter->flags & FLAG_SMART_POWER_DOWN)) {
|
||||
if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) &&
|
||||
!(adapter->flags & FLAG_SMART_POWER_DOWN)) {
|
||||
u16 phy_data = 0;
|
||||
/*
|
||||
* speed up time to link by disabling smart power down, ignore
|
||||
|
@ -3266,6 +3289,7 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
|
|||
{
|
||||
struct e1000_hw *hw = &adapter->hw;
|
||||
struct pci_dev *pdev = adapter->pdev;
|
||||
u16 phy_data;
|
||||
|
||||
/*
|
||||
* Prevent stats update while adapter is being reset, or if the pci
|
||||
|
@ -3285,11 +3309,34 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
|
|||
adapter->stats.roc += er32(ROC);
|
||||
|
||||
adapter->stats.mpc += er32(MPC);
|
||||
adapter->stats.scc += er32(SCC);
|
||||
adapter->stats.ecol += er32(ECOL);
|
||||
adapter->stats.mcc += er32(MCC);
|
||||
adapter->stats.latecol += er32(LATECOL);
|
||||
adapter->stats.dc += er32(DC);
|
||||
if ((hw->phy.type == e1000_phy_82578) ||
|
||||
(hw->phy.type == e1000_phy_82577)) {
|
||||
e1e_rphy(hw, HV_SCC_UPPER, &phy_data);
|
||||
e1e_rphy(hw, HV_SCC_LOWER, &phy_data);
|
||||
adapter->stats.scc += phy_data;
|
||||
|
||||
e1e_rphy(hw, HV_ECOL_UPPER, &phy_data);
|
||||
e1e_rphy(hw, HV_ECOL_LOWER, &phy_data);
|
||||
adapter->stats.ecol += phy_data;
|
||||
|
||||
e1e_rphy(hw, HV_MCC_UPPER, &phy_data);
|
||||
e1e_rphy(hw, HV_MCC_LOWER, &phy_data);
|
||||
adapter->stats.mcc += phy_data;
|
||||
|
||||
e1e_rphy(hw, HV_LATECOL_UPPER, &phy_data);
|
||||
e1e_rphy(hw, HV_LATECOL_LOWER, &phy_data);
|
||||
adapter->stats.latecol += phy_data;
|
||||
|
||||
e1e_rphy(hw, HV_DC_UPPER, &phy_data);
|
||||
e1e_rphy(hw, HV_DC_LOWER, &phy_data);
|
||||
adapter->stats.dc += phy_data;
|
||||
} else {
|
||||
adapter->stats.scc += er32(SCC);
|
||||
adapter->stats.ecol += er32(ECOL);
|
||||
adapter->stats.mcc += er32(MCC);
|
||||
adapter->stats.latecol += er32(LATECOL);
|
||||
adapter->stats.dc += er32(DC);
|
||||
}
|
||||
adapter->stats.xonrxc += er32(XONRXC);
|
||||
adapter->stats.xontxc += er32(XONTXC);
|
||||
adapter->stats.xoffrxc += er32(XOFFRXC);
|
||||
|
@ -3307,13 +3354,28 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
|
|||
|
||||
hw->mac.tx_packet_delta = er32(TPT);
|
||||
adapter->stats.tpt += hw->mac.tx_packet_delta;
|
||||
hw->mac.collision_delta = er32(COLC);
|
||||
if ((hw->phy.type == e1000_phy_82578) ||
|
||||
(hw->phy.type == e1000_phy_82577)) {
|
||||
e1e_rphy(hw, HV_COLC_UPPER, &phy_data);
|
||||
e1e_rphy(hw, HV_COLC_LOWER, &phy_data);
|
||||
hw->mac.collision_delta = phy_data;
|
||||
} else {
|
||||
hw->mac.collision_delta = er32(COLC);
|
||||
}
|
||||
adapter->stats.colc += hw->mac.collision_delta;
|
||||
|
||||
adapter->stats.algnerrc += er32(ALGNERRC);
|
||||
adapter->stats.rxerrc += er32(RXERRC);
|
||||
if ((hw->mac.type != e1000_82574) && (hw->mac.type != e1000_82583))
|
||||
adapter->stats.tncrs += er32(TNCRS);
|
||||
if ((hw->phy.type == e1000_phy_82578) ||
|
||||
(hw->phy.type == e1000_phy_82577)) {
|
||||
e1e_rphy(hw, HV_TNCRS_UPPER, &phy_data);
|
||||
e1e_rphy(hw, HV_TNCRS_LOWER, &phy_data);
|
||||
adapter->stats.tncrs += phy_data;
|
||||
} else {
|
||||
if ((hw->mac.type != e1000_82574) &&
|
||||
(hw->mac.type != e1000_82583))
|
||||
adapter->stats.tncrs += er32(TNCRS);
|
||||
}
|
||||
adapter->stats.cexterr += er32(CEXTERR);
|
||||
adapter->stats.tsctc += er32(TSCTC);
|
||||
adapter->stats.tsctfc += er32(TSCTFC);
|
||||
|
@ -3854,7 +3916,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
|
|||
buffer_info->length = size;
|
||||
buffer_info->time_stamp = jiffies;
|
||||
buffer_info->next_to_watch = i;
|
||||
buffer_info->dma = map[0] + offset;
|
||||
buffer_info->dma = skb_shinfo(skb)->dma_head + offset;
|
||||
count++;
|
||||
|
||||
len -= size;
|
||||
|
@ -3885,7 +3947,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
|
|||
buffer_info->length = size;
|
||||
buffer_info->time_stamp = jiffies;
|
||||
buffer_info->next_to_watch = i;
|
||||
buffer_info->dma = map[f + 1] + offset;
|
||||
buffer_info->dma = map[f] + offset;
|
||||
|
||||
len -= size;
|
||||
offset += size;
|
||||
|
@ -4149,7 +4211,6 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
|
|||
count = e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss);
|
||||
if (count) {
|
||||
e1000_tx_queue(adapter, tx_flags, count);
|
||||
netdev->trans_start = jiffies;
|
||||
/* Make sure there is space in the ring for the next send. */
|
||||
e1000_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 2);
|
||||
|
||||
|
@ -4210,27 +4271,17 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
|
|||
struct e1000_adapter *adapter = netdev_priv(netdev);
|
||||
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
|
||||
|
||||
if ((new_mtu < ETH_ZLEN + ETH_FCS_LEN + VLAN_HLEN) ||
|
||||
(max_frame > MAX_JUMBO_FRAME_SIZE)) {
|
||||
e_err("Invalid MTU setting\n");
|
||||
/* Jumbo frame support */
|
||||
if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) &&
|
||||
!(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
|
||||
e_err("Jumbo Frames not supported.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Jumbo frame size limits */
|
||||
if (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) {
|
||||
if (!(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
|
||||
e_err("Jumbo Frames not supported.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (adapter->hw.phy.type == e1000_phy_ife) {
|
||||
e_err("Jumbo Frames not supported.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
#define MAX_STD_JUMBO_FRAME_SIZE 9234
|
||||
if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
|
||||
e_err("MTU > 9216 not supported.\n");
|
||||
/* Supported frame sizes */
|
||||
if ((new_mtu < ETH_ZLEN + ETH_FCS_LEN + VLAN_HLEN) ||
|
||||
(max_frame > adapter->max_hw_frame_size)) {
|
||||
e_err("Unsupported MTU setting\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -4350,6 +4401,81 @@ static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
|
|||
}
|
||||
}
|
||||
|
||||
static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
|
||||
{
|
||||
struct e1000_hw *hw = &adapter->hw;
|
||||
u32 i, mac_reg;
|
||||
u16 phy_reg;
|
||||
int retval = 0;
|
||||
|
||||
/* copy MAC RARs to PHY RARs */
|
||||
for (i = 0; i < adapter->hw.mac.rar_entry_count; i++) {
|
||||
mac_reg = er32(RAL(i));
|
||||
e1e_wphy(hw, BM_RAR_L(i), (u16)(mac_reg & 0xFFFF));
|
||||
e1e_wphy(hw, BM_RAR_M(i), (u16)((mac_reg >> 16) & 0xFFFF));
|
||||
mac_reg = er32(RAH(i));
|
||||
e1e_wphy(hw, BM_RAR_H(i), (u16)(mac_reg & 0xFFFF));
|
||||
e1e_wphy(hw, BM_RAR_CTRL(i), (u16)((mac_reg >> 16) & 0xFFFF));
|
||||
}
|
||||
|
||||
/* copy MAC MTA to PHY MTA */
|
||||
for (i = 0; i < adapter->hw.mac.mta_reg_count; i++) {
|
||||
mac_reg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i);
|
||||
e1e_wphy(hw, BM_MTA(i), (u16)(mac_reg & 0xFFFF));
|
||||
e1e_wphy(hw, BM_MTA(i) + 1, (u16)((mac_reg >> 16) & 0xFFFF));
|
||||
}
|
||||
|
||||
/* configure PHY Rx Control register */
|
||||
e1e_rphy(&adapter->hw, BM_RCTL, &phy_reg);
|
||||
mac_reg = er32(RCTL);
|
||||
if (mac_reg & E1000_RCTL_UPE)
|
||||
phy_reg |= BM_RCTL_UPE;
|
||||
if (mac_reg & E1000_RCTL_MPE)
|
||||
phy_reg |= BM_RCTL_MPE;
|
||||
phy_reg &= ~(BM_RCTL_MO_MASK);
|
||||
if (mac_reg & E1000_RCTL_MO_3)
|
||||
phy_reg |= (((mac_reg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
|
||||
<< BM_RCTL_MO_SHIFT);
|
||||
if (mac_reg & E1000_RCTL_BAM)
|
||||
phy_reg |= BM_RCTL_BAM;
|
||||
if (mac_reg & E1000_RCTL_PMCF)
|
||||
phy_reg |= BM_RCTL_PMCF;
|
||||
mac_reg = er32(CTRL);
|
||||
if (mac_reg & E1000_CTRL_RFCE)
|
||||
phy_reg |= BM_RCTL_RFCE;
|
||||
e1e_wphy(&adapter->hw, BM_RCTL, phy_reg);
|
||||
|
||||
/* enable PHY wakeup in MAC register */
|
||||
ew32(WUFC, wufc);
|
||||
ew32(WUC, E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN);
|
||||
|
||||
/* configure and enable PHY wakeup in PHY registers */
|
||||
e1e_wphy(&adapter->hw, BM_WUFC, wufc);
|
||||
e1e_wphy(&adapter->hw, BM_WUC, E1000_WUC_PME_EN);
|
||||
|
||||
/* activate PHY wakeup */
|
||||
retval = hw->phy.ops.acquire_phy(hw);
|
||||
if (retval) {
|
||||
e_err("Could not acquire PHY\n");
|
||||
return retval;
|
||||
}
|
||||
e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
|
||||
(BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT));
|
||||
retval = e1000e_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, &phy_reg);
|
||||
if (retval) {
|
||||
e_err("Could not read PHY page 769\n");
|
||||
goto out;
|
||||
}
|
||||
phy_reg |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
|
||||
retval = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg);
|
||||
if (retval)
|
||||
e_err("Could not set PHY Host Wakeup bit\n");
|
||||
out:
|
||||
hw->phy.ops.release_phy(hw);
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
||||
static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
|
||||
{
|
||||
struct net_device *netdev = pci_get_drvdata(pdev);
|
||||
|
@ -4392,8 +4518,9 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
|
|||
#define E1000_CTRL_ADVD3WUC 0x00100000
|
||||
/* phy power management enable */
|
||||
#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
|
||||
ctrl |= E1000_CTRL_ADVD3WUC |
|
||||
E1000_CTRL_EN_PHY_PWR_MGMT;
|
||||
ctrl |= E1000_CTRL_ADVD3WUC;
|
||||
if (!(adapter->flags2 & FLAG2_HAS_PHY_WAKEUP))
|
||||
ctrl |= E1000_CTRL_EN_PHY_PWR_MGMT;
|
||||
ew32(CTRL, ctrl);
|
||||
|
||||
if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
|
||||
|
@ -4411,8 +4538,17 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
|
|||
/* Allow time for pending master requests to run */
|
||||
e1000e_disable_pcie_master(&adapter->hw);
|
||||
|
||||
ew32(WUC, E1000_WUC_PME_EN);
|
||||
ew32(WUFC, wufc);
|
||||
if ((adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) &&
|
||||
!(hw->mac.ops.check_mng_mode(hw))) {
|
||||
/* enable wakeup by the PHY */
|
||||
retval = e1000_init_phy_wakeup(adapter, wufc);
|
||||
if (retval)
|
||||
return retval;
|
||||
} else {
|
||||
/* enable wakeup by the MAC */
|
||||
ew32(WUFC, wufc);
|
||||
ew32(WUC, E1000_WUC_PME_EN);
|
||||
}
|
||||
} else {
|
||||
ew32(WUC, 0);
|
||||
ew32(WUFC, 0);
|
||||
|
@ -4555,8 +4691,37 @@ static int e1000_resume(struct pci_dev *pdev)
|
|||
}
|
||||
|
||||
e1000e_power_up_phy(adapter);
|
||||
|
||||
/* report the system wakeup cause from S3/S4 */
|
||||
if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
|
||||
u16 phy_data;
|
||||
|
||||
e1e_rphy(&adapter->hw, BM_WUS, &phy_data);
|
||||
if (phy_data) {
|
||||
e_info("PHY Wakeup cause - %s\n",
|
||||
phy_data & E1000_WUS_EX ? "Unicast Packet" :
|
||||
phy_data & E1000_WUS_MC ? "Multicast Packet" :
|
||||
phy_data & E1000_WUS_BC ? "Broadcast Packet" :
|
||||
phy_data & E1000_WUS_MAG ? "Magic Packet" :
|
||||
phy_data & E1000_WUS_LNKC ? "Link Status "
|
||||
" Change" : "other");
|
||||
}
|
||||
e1e_wphy(&adapter->hw, BM_WUS, ~0);
|
||||
} else {
|
||||
u32 wus = er32(WUS);
|
||||
if (wus) {
|
||||
e_info("MAC Wakeup cause - %s\n",
|
||||
wus & E1000_WUS_EX ? "Unicast Packet" :
|
||||
wus & E1000_WUS_MC ? "Multicast Packet" :
|
||||
wus & E1000_WUS_BC ? "Broadcast Packet" :
|
||||
wus & E1000_WUS_MAG ? "Magic Packet" :
|
||||
wus & E1000_WUS_LNKC ? "Link Status Change" :
|
||||
"other");
|
||||
}
|
||||
ew32(WUS, ~0);
|
||||
}
|
||||
|
||||
e1000e_reset(adapter);
|
||||
ew32(WUS, ~0);
|
||||
|
||||
e1000_init_manageability(adapter);
|
||||
|
||||
|
@ -4846,6 +5011,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
|
|||
adapter->flags2 = ei->flags2;
|
||||
adapter->hw.adapter = adapter;
|
||||
adapter->hw.mac.type = ei->mac;
|
||||
adapter->max_hw_frame_size = ei->max_hw_frame_size;
|
||||
adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1;
|
||||
|
||||
mmio_start = pci_resource_start(pdev, 0);
|
||||
|
@ -5001,6 +5167,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
|
|||
/* APME bit in EEPROM is mapped to WUC.APME */
|
||||
eeprom_data = er32(WUC);
|
||||
eeprom_apme_mask = E1000_WUC_APME;
|
||||
if (eeprom_data & E1000_WUC_PHY_WAKE)
|
||||
adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP;
|
||||
} else if (adapter->flags & FLAG_APME_IN_CTRL3) {
|
||||
if (adapter->flags & FLAG_APME_CHECK_PORT_B &&
|
||||
(adapter->hw.bus.func == 1))
|
||||
|
@ -5202,6 +5370,11 @@ static struct pci_device_id e1000_pci_tbl[] = {
|
|||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LM), board_ich10lan },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LF), board_ich10lan },
|
||||
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LM), board_pchlan },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LC), board_pchlan },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DM), board_pchlan },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DC), board_pchlan },
|
||||
|
||||
{ } /* terminate list */
|
||||
};
|
||||
MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
|
||||
|
|
|
@ -427,6 +427,8 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
|
|||
e1000_validate_option(&crc_stripping, &opt, adapter);
|
||||
if (crc_stripping == OPTION_ENABLED)
|
||||
adapter->flags2 |= FLAG2_CRC_STRIPPING;
|
||||
} else {
|
||||
adapter->flags2 |= FLAG2_CRC_STRIPPING;
|
||||
}
|
||||
}
|
||||
{ /* Kumeran Lock Loss Workaround */
|
||||
|
|
|
@ -37,6 +37,9 @@ static s32 e1000_wait_autoneg(struct e1000_hw *hw);
|
|||
static u32 e1000_get_phy_addr_for_bm_page(u32 page, u32 reg);
|
||||
static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
|
||||
u16 *data, bool read);
|
||||
static u32 e1000_get_phy_addr_for_hv_page(u32 page);
|
||||
static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
|
||||
u16 *data, bool read);
|
||||
|
||||
/* Cable length tables */
|
||||
static const u16 e1000_m88_cable_length_table[] =
|
||||
|
@ -54,6 +57,55 @@ static const u16 e1000_igp_2_cable_length_table[] =
|
|||
#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \
|
||||
ARRAY_SIZE(e1000_igp_2_cable_length_table)
|
||||
|
||||
#define BM_PHY_REG_PAGE(offset) \
|
||||
((u16)(((offset) >> PHY_PAGE_SHIFT) & 0xFFFF))
|
||||
#define BM_PHY_REG_NUM(offset) \
|
||||
((u16)(((offset) & MAX_PHY_REG_ADDRESS) |\
|
||||
(((offset) >> (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT)) &\
|
||||
~MAX_PHY_REG_ADDRESS)))
|
||||
|
||||
#define HV_INTC_FC_PAGE_START 768
|
||||
#define I82578_ADDR_REG 29
|
||||
#define I82577_ADDR_REG 16
|
||||
#define I82577_CFG_REG 22
|
||||
#define I82577_CFG_ASSERT_CRS_ON_TX (1 << 15)
|
||||
#define I82577_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift 100/10 */
|
||||
#define I82577_CTRL_REG 23
|
||||
#define I82577_CTRL_DOWNSHIFT_MASK (7 << 10)
|
||||
|
||||
/* 82577 specific PHY registers */
|
||||
#define I82577_PHY_CTRL_2 18
|
||||
#define I82577_PHY_STATUS_2 26
|
||||
#define I82577_PHY_DIAG_STATUS 31
|
||||
|
||||
/* I82577 PHY Status 2 */
|
||||
#define I82577_PHY_STATUS2_REV_POLARITY 0x0400
|
||||
#define I82577_PHY_STATUS2_MDIX 0x0800
|
||||
#define I82577_PHY_STATUS2_SPEED_MASK 0x0300
|
||||
#define I82577_PHY_STATUS2_SPEED_1000MBPS 0x0200
|
||||
|
||||
/* I82577 PHY Control 2 */
|
||||
#define I82577_PHY_CTRL2_AUTO_MDIX 0x0400
|
||||
#define I82577_PHY_CTRL2_FORCE_MDI_MDIX 0x0200
|
||||
|
||||
/* I82577 PHY Diagnostics Status */
|
||||
#define I82577_DSTATUS_CABLE_LENGTH 0x03FC
|
||||
#define I82577_DSTATUS_CABLE_LENGTH_SHIFT 2
|
||||
|
||||
/* BM PHY Copper Specific Control 1 */
|
||||
#define BM_CS_CTRL1 16
|
||||
|
||||
/* BM PHY Copper Specific Status */
|
||||
#define BM_CS_STATUS 17
|
||||
#define BM_CS_STATUS_LINK_UP 0x0400
|
||||
#define BM_CS_STATUS_RESOLVED 0x0800
|
||||
#define BM_CS_STATUS_SPEED_MASK 0xC000
|
||||
#define BM_CS_STATUS_SPEED_1000 0x8000
|
||||
|
||||
#define HV_MUX_DATA_CTRL PHY_REG(776, 16)
|
||||
#define HV_MUX_DATA_CTRL_GEN_TO_MAC 0x0400
|
||||
#define HV_MUX_DATA_CTRL_FORCE_SPEED 0x0004
|
||||
|
||||
/**
|
||||
* e1000e_check_reset_block_generic - Check if PHY reset is blocked
|
||||
* @hw: pointer to the HW structure
|
||||
|
@ -82,23 +134,48 @@ s32 e1000e_check_reset_block_generic(struct e1000_hw *hw)
|
|||
s32 e1000e_get_phy_id(struct e1000_hw *hw)
|
||||
{
|
||||
struct e1000_phy_info *phy = &hw->phy;
|
||||
s32 ret_val;
|
||||
s32 ret_val = 0;
|
||||
u16 phy_id;
|
||||
u16 retry_count = 0;
|
||||
|
||||
ret_val = e1e_rphy(hw, PHY_ID1, &phy_id);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
if (!(phy->ops.read_phy_reg))
|
||||
goto out;
|
||||
|
||||
phy->id = (u32)(phy_id << 16);
|
||||
udelay(20);
|
||||
ret_val = e1e_rphy(hw, PHY_ID2, &phy_id);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
while (retry_count < 2) {
|
||||
ret_val = e1e_rphy(hw, PHY_ID1, &phy_id);
|
||||
if (ret_val)
|
||||
goto out;
|
||||
|
||||
phy->id |= (u32)(phy_id & PHY_REVISION_MASK);
|
||||
phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK);
|
||||
phy->id = (u32)(phy_id << 16);
|
||||
udelay(20);
|
||||
ret_val = e1e_rphy(hw, PHY_ID2, &phy_id);
|
||||
if (ret_val)
|
||||
goto out;
|
||||
|
||||
return 0;
|
||||
phy->id |= (u32)(phy_id & PHY_REVISION_MASK);
|
||||
phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK);
|
||||
|
||||
if (phy->id != 0 && phy->id != PHY_REVISION_MASK)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* If the PHY ID is still unknown, we may have an 82577i
|
||||
* without link. We will try again after setting Slow
|
||||
* MDIC mode. No harm in trying again in this case since
|
||||
* the PHY ID is unknown at this point anyway
|
||||
*/
|
||||
ret_val = e1000_set_mdio_slow_mode_hv(hw, true);
|
||||
if (ret_val)
|
||||
goto out;
|
||||
|
||||
retry_count++;
|
||||
}
|
||||
out:
|
||||
/* Revert to MDIO fast mode, if applicable */
|
||||
if (retry_count)
|
||||
ret_val = e1000_set_mdio_slow_mode_hv(hw, false);
|
||||
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -409,6 +486,43 @@ s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data)
|
|||
return ret_val;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_copper_link_setup_82577 - Setup 82577 PHY for copper link
|
||||
* @hw: pointer to the HW structure
|
||||
*
|
||||
* Sets up Carrier-sense on Transmit and downshift values.
|
||||
**/
|
||||
s32 e1000_copper_link_setup_82577(struct e1000_hw *hw)
|
||||
{
|
||||
struct e1000_phy_info *phy = &hw->phy;
|
||||
s32 ret_val;
|
||||
u16 phy_data;
|
||||
|
||||
/* Enable CRS on TX. This must be set for half-duplex operation. */
|
||||
ret_val = phy->ops.read_phy_reg(hw, I82577_CFG_REG, &phy_data);
|
||||
if (ret_val)
|
||||
goto out;
|
||||
|
||||
phy_data |= I82577_CFG_ASSERT_CRS_ON_TX;
|
||||
|
||||
/* Enable downshift */
|
||||
phy_data |= I82577_CFG_ENABLE_DOWNSHIFT;
|
||||
|
||||
ret_val = phy->ops.write_phy_reg(hw, I82577_CFG_REG, phy_data);
|
||||
if (ret_val)
|
||||
goto out;
|
||||
|
||||
/* Set number of link attempts before downshift */
|
||||
ret_val = phy->ops.read_phy_reg(hw, I82577_CTRL_REG, &phy_data);
|
||||
if (ret_val)
|
||||
goto out;
|
||||
phy_data &= ~I82577_CTRL_DOWNSHIFT_MASK;
|
||||
ret_val = phy->ops.write_phy_reg(hw, I82577_CTRL_REG, phy_data);
|
||||
|
||||
out:
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000e_copper_link_setup_m88 - Setup m88 PHY's for copper link
|
||||
* @hw: pointer to the HW structure
|
||||
|
@ -427,8 +541,8 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw)
|
|||
if (ret_val)
|
||||
return ret_val;
|
||||
|
||||
/* For newer PHYs this bit is downshift enable */
|
||||
if (phy->type == e1000_phy_m88)
|
||||
/* For BM PHY this bit is downshift enable */
|
||||
if (phy->type != e1000_phy_bm)
|
||||
phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
|
||||
|
||||
/*
|
||||
|
@ -520,10 +634,27 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw)
|
|||
|
||||
/* Commit the changes. */
|
||||
ret_val = e1000e_commit_phy(hw);
|
||||
if (ret_val)
|
||||
if (ret_val) {
|
||||
hw_dbg(hw, "Error committing the PHY changes\n");
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
return ret_val;
|
||||
if (phy->type == e1000_phy_82578) {
|
||||
ret_val = phy->ops.read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
|
||||
&phy_data);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
|
||||
/* 82578 PHY - set the downshift count to 1x. */
|
||||
phy_data |= I82578_EPSCR_DOWNSHIFT_ENABLE;
|
||||
phy_data &= ~I82578_EPSCR_DOWNSHIFT_COUNTER_MASK;
|
||||
ret_val = phy->ops.write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
|
||||
phy_data);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1251,6 +1382,8 @@ s32 e1000e_check_downshift(struct e1000_hw *hw)
|
|||
switch (phy->type) {
|
||||
case e1000_phy_m88:
|
||||
case e1000_phy_gg82563:
|
||||
case e1000_phy_82578:
|
||||
case e1000_phy_82577:
|
||||
offset = M88E1000_PHY_SPEC_STATUS;
|
||||
mask = M88E1000_PSSR_DOWNSHIFT;
|
||||
break;
|
||||
|
@ -1886,6 +2019,12 @@ enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id)
|
|||
case BME1000_E_PHY_ID_R2:
|
||||
phy_type = e1000_phy_bm;
|
||||
break;
|
||||
case I82578_E_PHY_ID:
|
||||
phy_type = e1000_phy_82578;
|
||||
break;
|
||||
case I82577_E_PHY_ID:
|
||||
phy_type = e1000_phy_82577;
|
||||
break;
|
||||
default:
|
||||
phy_type = e1000_phy_unknown;
|
||||
break;
|
||||
|
@ -2181,11 +2320,16 @@ static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
|
|||
u16 *data, bool read)
|
||||
{
|
||||
s32 ret_val;
|
||||
u16 reg = ((u16)offset) & PHY_REG_MASK;
|
||||
u16 reg = BM_PHY_REG_NUM(offset);
|
||||
u16 phy_reg = 0;
|
||||
u8 phy_acquired = 1;
|
||||
|
||||
|
||||
/* Gig must be disabled for MDIO accesses to page 800 */
|
||||
if ((hw->mac.type == e1000_pchlan) &&
|
||||
(!(er32(PHY_CTRL) & E1000_PHY_CTRL_GBE_DISABLE)))
|
||||
hw_dbg(hw, "Attempting to access page 800 while gig enabled\n");
|
||||
|
||||
ret_val = hw->phy.ops.acquire_phy(hw);
|
||||
if (ret_val) {
|
||||
phy_acquired = 0;
|
||||
|
@ -2289,3 +2433,524 @@ static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active)
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw, bool slow)
|
||||
{
|
||||
s32 ret_val = 0;
|
||||
u16 data = 0;
|
||||
|
||||
ret_val = hw->phy.ops.acquire_phy(hw);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
|
||||
/* Set MDIO mode - page 769, register 16: 0x2580==slow, 0x2180==fast */
|
||||
hw->phy.addr = 1;
|
||||
ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
|
||||
(BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT));
|
||||
if (ret_val) {
|
||||
hw->phy.ops.release_phy(hw);
|
||||
return ret_val;
|
||||
}
|
||||
ret_val = e1000e_write_phy_reg_mdic(hw, BM_CS_CTRL1,
|
||||
(0x2180 | (slow << 10)));
|
||||
|
||||
/* dummy read when reverting to fast mode - throw away result */
|
||||
if (!slow)
|
||||
e1000e_read_phy_reg_mdic(hw, BM_CS_CTRL1, &data);
|
||||
|
||||
hw->phy.ops.release_phy(hw);
|
||||
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_read_phy_reg_hv - Read HV PHY register
|
||||
* @hw: pointer to the HW structure
|
||||
* @offset: register offset to be read
|
||||
* @data: pointer to the read data
|
||||
*
|
||||
* Acquires semaphore, if necessary, then reads the PHY register at offset
|
||||
* and storing the retrieved information in data. Release any acquired
|
||||
* semaphore before exiting.
|
||||
**/
|
||||
s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data)
|
||||
{
|
||||
s32 ret_val;
|
||||
u16 page = BM_PHY_REG_PAGE(offset);
|
||||
u16 reg = BM_PHY_REG_NUM(offset);
|
||||
bool in_slow_mode = false;
|
||||
|
||||
/* Workaround failure in MDIO access while cable is disconnected */
|
||||
if ((hw->phy.type == e1000_phy_82577) &&
|
||||
!(er32(STATUS) & E1000_STATUS_LU)) {
|
||||
ret_val = e1000_set_mdio_slow_mode_hv(hw, true);
|
||||
if (ret_val)
|
||||
goto out;
|
||||
|
||||
in_slow_mode = true;
|
||||
}
|
||||
|
||||
/* Page 800 works differently than the rest so it has its own func */
|
||||
if (page == BM_WUC_PAGE) {
|
||||
ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset,
|
||||
data, true);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (page > 0 && page < HV_INTC_FC_PAGE_START) {
|
||||
ret_val = e1000_access_phy_debug_regs_hv(hw, offset,
|
||||
data, true);
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret_val = hw->phy.ops.acquire_phy(hw);
|
||||
if (ret_val)
|
||||
goto out;
|
||||
|
||||
hw->phy.addr = e1000_get_phy_addr_for_hv_page(page);
|
||||
|
||||
if (page == HV_INTC_FC_PAGE_START)
|
||||
page = 0;
|
||||
|
||||
if (reg > MAX_PHY_MULTI_PAGE_REG) {
|
||||
if ((hw->phy.type != e1000_phy_82578) ||
|
||||
((reg != I82578_ADDR_REG) &&
|
||||
(reg != I82578_ADDR_REG + 1))) {
|
||||
u32 phy_addr = hw->phy.addr;
|
||||
|
||||
hw->phy.addr = 1;
|
||||
|
||||
/* Page is shifted left, PHY expects (page x 32) */
|
||||
ret_val = e1000e_write_phy_reg_mdic(hw,
|
||||
IGP01E1000_PHY_PAGE_SELECT,
|
||||
(page << IGP_PAGE_SHIFT));
|
||||
if (ret_val) {
|
||||
hw->phy.ops.release_phy(hw);
|
||||
goto out;
|
||||
}
|
||||
hw->phy.addr = phy_addr;
|
||||
}
|
||||
}
|
||||
|
||||
ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg,
|
||||
data);
|
||||
hw->phy.ops.release_phy(hw);
|
||||
|
||||
out:
|
||||
/* Revert to MDIO fast mode, if applicable */
|
||||
if ((hw->phy.type == e1000_phy_82577) && in_slow_mode)
|
||||
ret_val = e1000_set_mdio_slow_mode_hv(hw, false);
|
||||
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_write_phy_reg_hv - Write HV PHY register
|
||||
* @hw: pointer to the HW structure
|
||||
* @offset: register offset to write to
|
||||
* @data: data to write at register offset
|
||||
*
|
||||
* Acquires semaphore, if necessary, then writes the data to PHY register
|
||||
* at the offset. Release any acquired semaphores before exiting.
|
||||
**/
|
||||
s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data)
|
||||
{
|
||||
s32 ret_val;
|
||||
u16 page = BM_PHY_REG_PAGE(offset);
|
||||
u16 reg = BM_PHY_REG_NUM(offset);
|
||||
bool in_slow_mode = false;
|
||||
|
||||
/* Workaround failure in MDIO access while cable is disconnected */
|
||||
if ((hw->phy.type == e1000_phy_82577) &&
|
||||
!(er32(STATUS) & E1000_STATUS_LU)) {
|
||||
ret_val = e1000_set_mdio_slow_mode_hv(hw, true);
|
||||
if (ret_val)
|
||||
goto out;
|
||||
|
||||
in_slow_mode = true;
|
||||
}
|
||||
|
||||
/* Page 800 works differently than the rest so it has its own func */
|
||||
if (page == BM_WUC_PAGE) {
|
||||
ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset,
|
||||
&data, false);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (page > 0 && page < HV_INTC_FC_PAGE_START) {
|
||||
ret_val = e1000_access_phy_debug_regs_hv(hw, offset,
|
||||
&data, false);
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret_val = hw->phy.ops.acquire_phy(hw);
|
||||
if (ret_val)
|
||||
goto out;
|
||||
|
||||
hw->phy.addr = e1000_get_phy_addr_for_hv_page(page);
|
||||
|
||||
if (page == HV_INTC_FC_PAGE_START)
|
||||
page = 0;
|
||||
|
||||
/*
|
||||
* Workaround MDIO accesses being disabled after entering IEEE Power
|
||||
* Down (whenever bit 11 of the PHY Control register is set)
|
||||
*/
|
||||
if ((hw->phy.type == e1000_phy_82578) &&
|
||||
(hw->phy.revision >= 1) &&
|
||||
(hw->phy.addr == 2) &&
|
||||
((MAX_PHY_REG_ADDRESS & reg) == 0) &&
|
||||
(data & (1 << 11))) {
|
||||
u16 data2 = 0x7EFF;
|
||||
hw->phy.ops.release_phy(hw);
|
||||
ret_val = e1000_access_phy_debug_regs_hv(hw, (1 << 6) | 0x3,
|
||||
&data2, false);
|
||||
if (ret_val)
|
||||
goto out;
|
||||
|
||||
ret_val = hw->phy.ops.acquire_phy(hw);
|
||||
if (ret_val)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (reg > MAX_PHY_MULTI_PAGE_REG) {
|
||||
if ((hw->phy.type != e1000_phy_82578) ||
|
||||
((reg != I82578_ADDR_REG) &&
|
||||
(reg != I82578_ADDR_REG + 1))) {
|
||||
u32 phy_addr = hw->phy.addr;
|
||||
|
||||
hw->phy.addr = 1;
|
||||
|
||||
/* Page is shifted left, PHY expects (page x 32) */
|
||||
ret_val = e1000e_write_phy_reg_mdic(hw,
|
||||
IGP01E1000_PHY_PAGE_SELECT,
|
||||
(page << IGP_PAGE_SHIFT));
|
||||
if (ret_val) {
|
||||
hw->phy.ops.release_phy(hw);
|
||||
goto out;
|
||||
}
|
||||
hw->phy.addr = phy_addr;
|
||||
}
|
||||
}
|
||||
|
||||
ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg,
|
||||
data);
|
||||
hw->phy.ops.release_phy(hw);
|
||||
|
||||
out:
|
||||
/* Revert to MDIO fast mode, if applicable */
|
||||
if ((hw->phy.type == e1000_phy_82577) && in_slow_mode)
|
||||
ret_val = e1000_set_mdio_slow_mode_hv(hw, false);
|
||||
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_get_phy_addr_for_hv_page - Get PHY adrress based on page
|
||||
* @page: page to be accessed
|
||||
**/
|
||||
static u32 e1000_get_phy_addr_for_hv_page(u32 page)
|
||||
{
|
||||
u32 phy_addr = 2;
|
||||
|
||||
if (page >= HV_INTC_FC_PAGE_START)
|
||||
phy_addr = 1;
|
||||
|
||||
return phy_addr;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_access_phy_debug_regs_hv - Read HV PHY vendor specific high registers
|
||||
* @hw: pointer to the HW structure
|
||||
* @offset: register offset to be read or written
|
||||
* @data: pointer to the data to be read or written
|
||||
* @read: determines if operation is read or written
|
||||
*
|
||||
* Acquires semaphore, if necessary, then reads the PHY register at offset
|
||||
* and storing the retreived information in data. Release any acquired
|
||||
* semaphores before exiting. Note that the procedure to read these regs
|
||||
* uses the address port and data port to read/write.
|
||||
**/
|
||||
static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
|
||||
u16 *data, bool read)
|
||||
{
|
||||
s32 ret_val;
|
||||
u32 addr_reg = 0;
|
||||
u32 data_reg = 0;
|
||||
u8 phy_acquired = 1;
|
||||
|
||||
/* This takes care of the difference with desktop vs mobile phy */
|
||||
addr_reg = (hw->phy.type == e1000_phy_82578) ?
|
||||
I82578_ADDR_REG : I82577_ADDR_REG;
|
||||
data_reg = addr_reg + 1;
|
||||
|
||||
ret_val = hw->phy.ops.acquire_phy(hw);
|
||||
if (ret_val) {
|
||||
hw_dbg(hw, "Could not acquire PHY\n");
|
||||
phy_acquired = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* All operations in this function are phy address 2 */
|
||||
hw->phy.addr = 2;
|
||||
|
||||
/* masking with 0x3F to remove the page from offset */
|
||||
ret_val = e1000e_write_phy_reg_mdic(hw, addr_reg, (u16)offset & 0x3F);
|
||||
if (ret_val) {
|
||||
hw_dbg(hw, "Could not write PHY the HV address register\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Read or write the data value next */
|
||||
if (read)
|
||||
ret_val = e1000e_read_phy_reg_mdic(hw, data_reg, data);
|
||||
else
|
||||
ret_val = e1000e_write_phy_reg_mdic(hw, data_reg, *data);
|
||||
|
||||
if (ret_val) {
|
||||
hw_dbg(hw, "Could not read data value from HV data register\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
out:
|
||||
if (phy_acquired == 1)
|
||||
hw->phy.ops.release_phy(hw);
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_link_stall_workaround_hv - Si workaround
|
||||
* @hw: pointer to the HW structure
|
||||
*
|
||||
* This function works around a Si bug where the link partner can get
|
||||
* a link up indication before the PHY does. If small packets are sent
|
||||
* by the link partner they can be placed in the packet buffer without
|
||||
* being properly accounted for by the PHY and will stall preventing
|
||||
* further packets from being received. The workaround is to clear the
|
||||
* packet buffer after the PHY detects link up.
|
||||
**/
|
||||
s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw)
|
||||
{
|
||||
s32 ret_val = 0;
|
||||
u16 data;
|
||||
|
||||
if (hw->phy.type != e1000_phy_82578)
|
||||
goto out;
|
||||
|
||||
/* check if link is up and at 1Gbps */
|
||||
ret_val = hw->phy.ops.read_phy_reg(hw, BM_CS_STATUS, &data);
|
||||
if (ret_val)
|
||||
goto out;
|
||||
|
||||
data &= BM_CS_STATUS_LINK_UP |
|
||||
BM_CS_STATUS_RESOLVED |
|
||||
BM_CS_STATUS_SPEED_MASK;
|
||||
|
||||
if (data != (BM_CS_STATUS_LINK_UP |
|
||||
BM_CS_STATUS_RESOLVED |
|
||||
BM_CS_STATUS_SPEED_1000))
|
||||
goto out;
|
||||
|
||||
mdelay(200);
|
||||
|
||||
/* flush the packets in the fifo buffer */
|
||||
ret_val = hw->phy.ops.write_phy_reg(hw, HV_MUX_DATA_CTRL,
|
||||
HV_MUX_DATA_CTRL_GEN_TO_MAC |
|
||||
HV_MUX_DATA_CTRL_FORCE_SPEED);
|
||||
if (ret_val)
|
||||
goto out;
|
||||
|
||||
ret_val = hw->phy.ops.write_phy_reg(hw, HV_MUX_DATA_CTRL,
|
||||
HV_MUX_DATA_CTRL_GEN_TO_MAC);
|
||||
|
||||
out:
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_check_polarity_82577 - Checks the polarity.
|
||||
* @hw: pointer to the HW structure
|
||||
*
|
||||
* Success returns 0, Failure returns -E1000_ERR_PHY (-2)
|
||||
*
|
||||
* Polarity is determined based on the PHY specific status register.
|
||||
**/
|
||||
s32 e1000_check_polarity_82577(struct e1000_hw *hw)
|
||||
{
|
||||
struct e1000_phy_info *phy = &hw->phy;
|
||||
s32 ret_val;
|
||||
u16 data;
|
||||
|
||||
ret_val = phy->ops.read_phy_reg(hw, I82577_PHY_STATUS_2, &data);
|
||||
|
||||
if (!ret_val)
|
||||
phy->cable_polarity = (data & I82577_PHY_STATUS2_REV_POLARITY)
|
||||
? e1000_rev_polarity_reversed
|
||||
: e1000_rev_polarity_normal;
|
||||
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_phy_force_speed_duplex_82577 - Force speed/duplex for I82577 PHY
|
||||
* @hw: pointer to the HW structure
|
||||
*
|
||||
* Calls the PHY setup function to force speed and duplex. Clears the
|
||||
* auto-crossover to force MDI manually. Waits for link and returns
|
||||
* successful if link up is successful, else -E1000_ERR_PHY (-2).
|
||||
**/
|
||||
s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw)
|
||||
{
|
||||
struct e1000_phy_info *phy = &hw->phy;
|
||||
s32 ret_val;
|
||||
u16 phy_data;
|
||||
bool link;
|
||||
|
||||
ret_val = phy->ops.read_phy_reg(hw, PHY_CONTROL, &phy_data);
|
||||
if (ret_val)
|
||||
goto out;
|
||||
|
||||
e1000e_phy_force_speed_duplex_setup(hw, &phy_data);
|
||||
|
||||
ret_val = phy->ops.write_phy_reg(hw, PHY_CONTROL, phy_data);
|
||||
if (ret_val)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* Clear Auto-Crossover to force MDI manually. 82577 requires MDI
|
||||
* forced whenever speed and duplex are forced.
|
||||
*/
|
||||
ret_val = phy->ops.read_phy_reg(hw, I82577_PHY_CTRL_2, &phy_data);
|
||||
if (ret_val)
|
||||
goto out;
|
||||
|
||||
phy_data &= ~I82577_PHY_CTRL2_AUTO_MDIX;
|
||||
phy_data &= ~I82577_PHY_CTRL2_FORCE_MDI_MDIX;
|
||||
|
||||
ret_val = phy->ops.write_phy_reg(hw, I82577_PHY_CTRL_2, phy_data);
|
||||
if (ret_val)
|
||||
goto out;
|
||||
|
||||
hw_dbg(hw, "I82577_PHY_CTRL_2: %X\n", phy_data);
|
||||
|
||||
udelay(1);
|
||||
|
||||
if (phy->autoneg_wait_to_complete) {
|
||||
hw_dbg(hw, "Waiting for forced speed/duplex link on 82577 phy\n");
|
||||
|
||||
ret_val = e1000e_phy_has_link_generic(hw,
|
||||
PHY_FORCE_LIMIT,
|
||||
100000,
|
||||
&link);
|
||||
if (ret_val)
|
||||
goto out;
|
||||
|
||||
if (!link)
|
||||
hw_dbg(hw, "Link taking longer than expected.\n");
|
||||
|
||||
/* Try once more */
|
||||
ret_val = e1000e_phy_has_link_generic(hw,
|
||||
PHY_FORCE_LIMIT,
|
||||
100000,
|
||||
&link);
|
||||
if (ret_val)
|
||||
goto out;
|
||||
}
|
||||
|
||||
out:
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_get_phy_info_82577 - Retrieve I82577 PHY information
|
||||
* @hw: pointer to the HW structure
|
||||
*
|
||||
* Read PHY status to determine if link is up. If link is up, then
|
||||
* set/determine 10base-T extended distance and polarity correction. Read
|
||||
* PHY port status to determine MDI/MDIx and speed. Based on the speed,
|
||||
* determine on the cable length, local and remote receiver.
|
||||
**/
|
||||
s32 e1000_get_phy_info_82577(struct e1000_hw *hw)
|
||||
{
|
||||
struct e1000_phy_info *phy = &hw->phy;
|
||||
s32 ret_val;
|
||||
u16 data;
|
||||
bool link;
|
||||
|
||||
ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
|
||||
if (ret_val)
|
||||
goto out;
|
||||
|
||||
if (!link) {
|
||||
hw_dbg(hw, "Phy info is only valid if link is up\n");
|
||||
ret_val = -E1000_ERR_CONFIG;
|
||||
goto out;
|
||||
}
|
||||
|
||||
phy->polarity_correction = true;
|
||||
|
||||
ret_val = e1000_check_polarity_82577(hw);
|
||||
if (ret_val)
|
||||
goto out;
|
||||
|
||||
ret_val = phy->ops.read_phy_reg(hw, I82577_PHY_STATUS_2, &data);
|
||||
if (ret_val)
|
||||
goto out;
|
||||
|
||||
phy->is_mdix = (data & I82577_PHY_STATUS2_MDIX) ? true : false;
|
||||
|
||||
if ((data & I82577_PHY_STATUS2_SPEED_MASK) ==
|
||||
I82577_PHY_STATUS2_SPEED_1000MBPS) {
|
||||
ret_val = hw->phy.ops.get_cable_length(hw);
|
||||
if (ret_val)
|
||||
goto out;
|
||||
|
||||
ret_val = phy->ops.read_phy_reg(hw, PHY_1000T_STATUS, &data);
|
||||
if (ret_val)
|
||||
goto out;
|
||||
|
||||
phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS)
|
||||
? e1000_1000t_rx_status_ok
|
||||
: e1000_1000t_rx_status_not_ok;
|
||||
|
||||
phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS)
|
||||
? e1000_1000t_rx_status_ok
|
||||
: e1000_1000t_rx_status_not_ok;
|
||||
} else {
|
||||
phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
|
||||
phy->local_rx = e1000_1000t_rx_status_undefined;
|
||||
phy->remote_rx = e1000_1000t_rx_status_undefined;
|
||||
}
|
||||
|
||||
out:
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_get_cable_length_82577 - Determine cable length for 82577 PHY
|
||||
* @hw: pointer to the HW structure
|
||||
*
|
||||
* Reads the diagnostic status register and verifies result is valid before
|
||||
* placing it in the phy_cable_length field.
|
||||
**/
|
||||
s32 e1000_get_cable_length_82577(struct e1000_hw *hw)
|
||||
{
|
||||
struct e1000_phy_info *phy = &hw->phy;
|
||||
s32 ret_val;
|
||||
u16 phy_data, length;
|
||||
|
||||
ret_val = phy->ops.read_phy_reg(hw, I82577_PHY_DIAG_STATUS, &phy_data);
|
||||
if (ret_val)
|
||||
goto out;
|
||||
|
||||
length = (phy_data & I82577_DSTATUS_CABLE_LENGTH) >>
|
||||
I82577_DSTATUS_CABLE_LENGTH_SHIFT;
|
||||
|
||||
if (length == E1000_CABLE_LENGTH_UNDEFINED)
|
||||
ret_val = E1000_ERR_PHY;
|
||||
|
||||
phy->cable_length = length;
|
||||
|
||||
out:
|
||||
return ret_val;
|
||||
}
|
||||
|
|
|
@ -661,8 +661,6 @@ static int enic_hard_start_xmit(struct sk_buff *skb, struct net_device *netdev)
|
|||
if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + 1)
|
||||
netif_stop_queue(netdev);
|
||||
|
||||
netdev->trans_start = jiffies;
|
||||
|
||||
spin_unlock_irqrestore(&enic->wq_lock[0], flags);
|
||||
|
||||
return NETDEV_TX_OK;
|
||||
|
|
|
@ -77,27 +77,31 @@
|
|||
* Hardware access:
|
||||
*/
|
||||
|
||||
#define DEV_NEED_TIMERIRQ 0x000001 /* set the timer irq flag in the irq mask */
|
||||
#define DEV_NEED_LINKTIMER 0x000002 /* poll link settings. Relies on the timer irq */
|
||||
#define DEV_HAS_LARGEDESC 0x000004 /* device supports jumbo frames and needs packet format 2 */
|
||||
#define DEV_HAS_HIGH_DMA 0x000008 /* device supports 64bit dma */
|
||||
#define DEV_HAS_CHECKSUM 0x000010 /* device supports tx and rx checksum offloads */
|
||||
#define DEV_HAS_VLAN 0x000020 /* device supports vlan tagging and striping */
|
||||
#define DEV_HAS_MSI 0x000040 /* device supports MSI */
|
||||
#define DEV_HAS_MSI_X 0x000080 /* device supports MSI-X */
|
||||
#define DEV_HAS_POWER_CNTRL 0x000100 /* device supports power savings */
|
||||
#define DEV_HAS_STATISTICS_V1 0x000200 /* device supports hw statistics version 1 */
|
||||
#define DEV_HAS_STATISTICS_V2 0x000600 /* device supports hw statistics version 2 */
|
||||
#define DEV_HAS_STATISTICS_V3 0x000e00 /* device supports hw statistics version 3 */
|
||||
#define DEV_HAS_TEST_EXTENDED 0x001000 /* device supports extended diagnostic test */
|
||||
#define DEV_HAS_MGMT_UNIT 0x002000 /* device supports management unit */
|
||||
#define DEV_HAS_CORRECT_MACADDR 0x004000 /* device supports correct mac address order */
|
||||
#define DEV_HAS_COLLISION_FIX 0x008000 /* device supports tx collision fix */
|
||||
#define DEV_HAS_PAUSEFRAME_TX_V1 0x010000 /* device supports tx pause frames version 1 */
|
||||
#define DEV_HAS_PAUSEFRAME_TX_V2 0x020000 /* device supports tx pause frames version 2 */
|
||||
#define DEV_HAS_PAUSEFRAME_TX_V3 0x040000 /* device supports tx pause frames version 3 */
|
||||
#define DEV_NEED_TX_LIMIT 0x080000 /* device needs to limit tx */
|
||||
#define DEV_HAS_GEAR_MODE 0x100000 /* device supports gear mode */
|
||||
#define DEV_NEED_TIMERIRQ 0x0000001 /* set the timer irq flag in the irq mask */
|
||||
#define DEV_NEED_LINKTIMER 0x0000002 /* poll link settings. Relies on the timer irq */
|
||||
#define DEV_HAS_LARGEDESC 0x0000004 /* device supports jumbo frames and needs packet format 2 */
|
||||
#define DEV_HAS_HIGH_DMA 0x0000008 /* device supports 64bit dma */
|
||||
#define DEV_HAS_CHECKSUM 0x0000010 /* device supports tx and rx checksum offloads */
|
||||
#define DEV_HAS_VLAN 0x0000020 /* device supports vlan tagging and striping */
|
||||
#define DEV_HAS_MSI 0x0000040 /* device supports MSI */
|
||||
#define DEV_HAS_MSI_X 0x0000080 /* device supports MSI-X */
|
||||
#define DEV_HAS_POWER_CNTRL 0x0000100 /* device supports power savings */
|
||||
#define DEV_HAS_STATISTICS_V1 0x0000200 /* device supports hw statistics version 1 */
|
||||
#define DEV_HAS_STATISTICS_V2 0x0000600 /* device supports hw statistics version 2 */
|
||||
#define DEV_HAS_STATISTICS_V3 0x0000e00 /* device supports hw statistics version 3 */
|
||||
#define DEV_HAS_TEST_EXTENDED 0x0001000 /* device supports extended diagnostic test */
|
||||
#define DEV_HAS_MGMT_UNIT 0x0002000 /* device supports management unit */
|
||||
#define DEV_HAS_CORRECT_MACADDR 0x0004000 /* device supports correct mac address order */
|
||||
#define DEV_HAS_COLLISION_FIX 0x0008000 /* device supports tx collision fix */
|
||||
#define DEV_HAS_PAUSEFRAME_TX_V1 0x0010000 /* device supports tx pause frames version 1 */
|
||||
#define DEV_HAS_PAUSEFRAME_TX_V2 0x0020000 /* device supports tx pause frames version 2 */
|
||||
#define DEV_HAS_PAUSEFRAME_TX_V3 0x0040000 /* device supports tx pause frames version 3 */
|
||||
#define DEV_NEED_TX_LIMIT 0x0080000 /* device needs to limit tx */
|
||||
#define DEV_NEED_TX_LIMIT2 0x0180000 /* device needs to limit tx, expect for some revs */
|
||||
#define DEV_HAS_GEAR_MODE 0x0200000 /* device supports gear mode */
|
||||
#define DEV_NEED_PHY_INIT_FIX 0x0400000 /* device needs specific phy workaround */
|
||||
#define DEV_NEED_LOW_POWER_FIX 0x0800000 /* device needs special power up workaround */
|
||||
#define DEV_NEED_MSI_FIX 0x1000000 /* device needs msi workaround */
|
||||
|
||||
enum {
|
||||
NvRegIrqStatus = 0x000,
|
||||
|
@ -898,6 +902,12 @@ enum {
|
|||
};
|
||||
static int phy_cross = NV_CROSSOVER_DETECTION_DISABLED;
|
||||
|
||||
/*
|
||||
* Power down phy when interface is down (persists through reboot;
|
||||
* older Linux and other OSes may not power it up again)
|
||||
*/
|
||||
static int phy_power_down = 0;
|
||||
|
||||
static inline struct fe_priv *get_nvpriv(struct net_device *dev)
|
||||
{
|
||||
return netdev_priv(dev);
|
||||
|
@ -1265,14 +1275,7 @@ static int phy_init(struct net_device *dev)
|
|||
}
|
||||
}
|
||||
if (np->phy_model == PHY_MODEL_REALTEK_8201) {
|
||||
if (np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_32 ||
|
||||
np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_33 ||
|
||||
np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_34 ||
|
||||
np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_35 ||
|
||||
np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_36 ||
|
||||
np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_37 ||
|
||||
np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_38 ||
|
||||
np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_39) {
|
||||
if (np->driver_data & DEV_NEED_PHY_INIT_FIX) {
|
||||
phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ);
|
||||
phy_reserved |= PHY_REALTEK_INIT7;
|
||||
if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, phy_reserved)) {
|
||||
|
@ -1463,14 +1466,7 @@ static int phy_init(struct net_device *dev)
|
|||
}
|
||||
}
|
||||
if (np->phy_model == PHY_MODEL_REALTEK_8201) {
|
||||
if (np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_32 ||
|
||||
np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_33 ||
|
||||
np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_34 ||
|
||||
np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_35 ||
|
||||
np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_36 ||
|
||||
np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_37 ||
|
||||
np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_38 ||
|
||||
np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_39) {
|
||||
if (np->driver_data & DEV_NEED_PHY_INIT_FIX) {
|
||||
phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ);
|
||||
phy_reserved |= PHY_REALTEK_INIT7;
|
||||
if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, phy_reserved)) {
|
||||
|
@ -1503,7 +1499,10 @@ static int phy_init(struct net_device *dev)
|
|||
|
||||
/* restart auto negotiation, power down phy */
|
||||
mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
|
||||
mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE | BMCR_PDOWN);
|
||||
mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
|
||||
if (phy_power_down) {
|
||||
mii_control |= BMCR_PDOWN;
|
||||
}
|
||||
if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
|
||||
return PHY_ERROR;
|
||||
}
|
||||
|
@ -5534,7 +5533,7 @@ static int nv_close(struct net_device *dev)
|
|||
|
||||
nv_drain_rxtx(dev);
|
||||
|
||||
if (np->wolenabled) {
|
||||
if (np->wolenabled || !phy_power_down) {
|
||||
nv_txrx_gate(dev, false);
|
||||
writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
|
||||
nv_start_rx(dev);
|
||||
|
@ -5835,8 +5834,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
|
|||
/* take phy and nic out of low power mode */
|
||||
powerstate = readl(base + NvRegPowerState2);
|
||||
powerstate &= ~NVREG_POWERSTATE2_POWERUP_MASK;
|
||||
if ((id->device == PCI_DEVICE_ID_NVIDIA_NVENET_12 ||
|
||||
id->device == PCI_DEVICE_ID_NVIDIA_NVENET_13) &&
|
||||
if ((id->driver_data & DEV_NEED_LOW_POWER_FIX) &&
|
||||
pci_dev->revision >= 0xA3)
|
||||
powerstate |= NVREG_POWERSTATE2_POWERUP_REV_A3;
|
||||
writel(powerstate, base + NvRegPowerState2);
|
||||
|
@ -5892,14 +5890,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
|
|||
/* Limit the number of tx's outstanding for hw bug */
|
||||
if (id->driver_data & DEV_NEED_TX_LIMIT) {
|
||||
np->tx_limit = 1;
|
||||
if ((id->device == PCI_DEVICE_ID_NVIDIA_NVENET_32 ||
|
||||
id->device == PCI_DEVICE_ID_NVIDIA_NVENET_33 ||
|
||||
id->device == PCI_DEVICE_ID_NVIDIA_NVENET_34 ||
|
||||
id->device == PCI_DEVICE_ID_NVIDIA_NVENET_35 ||
|
||||
id->device == PCI_DEVICE_ID_NVIDIA_NVENET_36 ||
|
||||
id->device == PCI_DEVICE_ID_NVIDIA_NVENET_37 ||
|
||||
id->device == PCI_DEVICE_ID_NVIDIA_NVENET_38 ||
|
||||
id->device == PCI_DEVICE_ID_NVIDIA_NVENET_39) &&
|
||||
if ((id->driver_data & DEV_NEED_TX_LIMIT2) &&
|
||||
pci_dev->revision >= 0xA2)
|
||||
np->tx_limit = 0;
|
||||
}
|
||||
|
@ -6149,7 +6140,8 @@ static int nv_resume(struct pci_dev *pdev)
|
|||
for (i = 0;i <= np->register_size/sizeof(u32); i++)
|
||||
writel(np->saved_config_space[i], base+i*sizeof(u32));
|
||||
|
||||
pci_write_config_dword(pdev, NV_MSI_PRIV_OFFSET, NV_MSI_PRIV_VALUE);
|
||||
if (np->driver_data & DEV_NEED_MSI_FIX)
|
||||
pci_write_config_dword(pdev, NV_MSI_PRIV_OFFSET, NV_MSI_PRIV_VALUE);
|
||||
|
||||
/* restore phy state, including autoneg */
|
||||
phy_init(dev);
|
||||
|
@ -6198,160 +6190,164 @@ static void nv_shutdown(struct pci_dev *pdev)
|
|||
|
||||
static struct pci_device_id pci_tbl[] = {
|
||||
{ /* nForce Ethernet Controller */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_1),
|
||||
PCI_DEVICE(0x10DE, 0x01C3),
|
||||
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
|
||||
},
|
||||
{ /* nForce2 Ethernet Controller */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_2),
|
||||
PCI_DEVICE(0x10DE, 0x0066),
|
||||
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
|
||||
},
|
||||
{ /* nForce3 Ethernet Controller */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_3),
|
||||
PCI_DEVICE(0x10DE, 0x00D6),
|
||||
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
|
||||
},
|
||||
{ /* nForce3 Ethernet Controller */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_4),
|
||||
PCI_DEVICE(0x10DE, 0x0086),
|
||||
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
|
||||
},
|
||||
{ /* nForce3 Ethernet Controller */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_5),
|
||||
PCI_DEVICE(0x10DE, 0x008C),
|
||||
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
|
||||
},
|
||||
{ /* nForce3 Ethernet Controller */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_6),
|
||||
PCI_DEVICE(0x10DE, 0x00E6),
|
||||
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
|
||||
},
|
||||
{ /* nForce3 Ethernet Controller */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_7),
|
||||
PCI_DEVICE(0x10DE, 0x00DF),
|
||||
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
|
||||
},
|
||||
{ /* CK804 Ethernet Controller */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_8),
|
||||
PCI_DEVICE(0x10DE, 0x0056),
|
||||
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
|
||||
},
|
||||
{ /* CK804 Ethernet Controller */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_9),
|
||||
PCI_DEVICE(0x10DE, 0x0057),
|
||||
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
|
||||
},
|
||||
{ /* MCP04 Ethernet Controller */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_10),
|
||||
PCI_DEVICE(0x10DE, 0x0037),
|
||||
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
|
||||
},
|
||||
{ /* MCP04 Ethernet Controller */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_11),
|
||||
PCI_DEVICE(0x10DE, 0x0038),
|
||||
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
|
||||
},
|
||||
{ /* MCP51 Ethernet Controller */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_12),
|
||||
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1,
|
||||
PCI_DEVICE(0x10DE, 0x0268),
|
||||
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1|DEV_NEED_LOW_POWER_FIX,
|
||||
},
|
||||
{ /* MCP51 Ethernet Controller */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_13),
|
||||
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1,
|
||||
PCI_DEVICE(0x10DE, 0x0269),
|
||||
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1|DEV_NEED_LOW_POWER_FIX,
|
||||
},
|
||||
{ /* MCP55 Ethernet Controller */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14),
|
||||
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT,
|
||||
PCI_DEVICE(0x10DE, 0x0372),
|
||||
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX,
|
||||
},
|
||||
{ /* MCP55 Ethernet Controller */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15),
|
||||
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT,
|
||||
PCI_DEVICE(0x10DE, 0x0373),
|
||||
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX,
|
||||
},
|
||||
{ /* MCP61 Ethernet Controller */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_16),
|
||||
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
|
||||
PCI_DEVICE(0x10DE, 0x03E5),
|
||||
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
|
||||
},
|
||||
{ /* MCP61 Ethernet Controller */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_17),
|
||||
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
|
||||
PCI_DEVICE(0x10DE, 0x03E6),
|
||||
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
|
||||
},
|
||||
{ /* MCP61 Ethernet Controller */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_18),
|
||||
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
|
||||
PCI_DEVICE(0x10DE, 0x03EE),
|
||||
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
|
||||
},
|
||||
{ /* MCP61 Ethernet Controller */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_19),
|
||||
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
|
||||
PCI_DEVICE(0x10DE, 0x03EF),
|
||||
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
|
||||
},
|
||||
{ /* MCP65 Ethernet Controller */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_20),
|
||||
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
|
||||
PCI_DEVICE(0x10DE, 0x0450),
|
||||
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
|
||||
},
|
||||
{ /* MCP65 Ethernet Controller */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_21),
|
||||
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
|
||||
PCI_DEVICE(0x10DE, 0x0451),
|
||||
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
|
||||
},
|
||||
{ /* MCP65 Ethernet Controller */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_22),
|
||||
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
|
||||
PCI_DEVICE(0x10DE, 0x0452),
|
||||
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
|
||||
},
|
||||
{ /* MCP65 Ethernet Controller */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_23),
|
||||
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
|
||||
PCI_DEVICE(0x10DE, 0x0453),
|
||||
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
|
||||
},
|
||||
{ /* MCP67 Ethernet Controller */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_24),
|
||||
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE,
|
||||
PCI_DEVICE(0x10DE, 0x054C),
|
||||
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
|
||||
},
|
||||
{ /* MCP67 Ethernet Controller */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_25),
|
||||
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE,
|
||||
PCI_DEVICE(0x10DE, 0x054D),
|
||||
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
|
||||
},
|
||||
{ /* MCP67 Ethernet Controller */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_26),
|
||||
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE,
|
||||
PCI_DEVICE(0x10DE, 0x054E),
|
||||
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
|
||||
},
|
||||
{ /* MCP67 Ethernet Controller */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_27),
|
||||
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE,
|
||||
PCI_DEVICE(0x10DE, 0x054F),
|
||||
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
|
||||
},
|
||||
{ /* MCP73 Ethernet Controller */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_28),
|
||||
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE,
|
||||
PCI_DEVICE(0x10DE, 0x07DC),
|
||||
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
|
||||
},
|
||||
{ /* MCP73 Ethernet Controller */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_29),
|
||||
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE,
|
||||
PCI_DEVICE(0x10DE, 0x07DD),
|
||||
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
|
||||
},
|
||||
{ /* MCP73 Ethernet Controller */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_30),
|
||||
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE,
|
||||
PCI_DEVICE(0x10DE, 0x07DE),
|
||||
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
|
||||
},
|
||||
{ /* MCP73 Ethernet Controller */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_31),
|
||||
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE,
|
||||
PCI_DEVICE(0x10DE, 0x07DF),
|
||||
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
|
||||
},
|
||||
{ /* MCP77 Ethernet Controller */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_32),
|
||||
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
|
||||
PCI_DEVICE(0x10DE, 0x0760),
|
||||
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
|
||||
},
|
||||
{ /* MCP77 Ethernet Controller */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_33),
|
||||
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
|
||||
PCI_DEVICE(0x10DE, 0x0761),
|
||||
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
|
||||
},
|
||||
{ /* MCP77 Ethernet Controller */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_34),
|
||||
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
|
||||
PCI_DEVICE(0x10DE, 0x0762),
|
||||
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
|
||||
},
|
||||
{ /* MCP77 Ethernet Controller */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_35),
|
||||
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
|
||||
PCI_DEVICE(0x10DE, 0x0763),
|
||||
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
|
||||
},
|
||||
{ /* MCP79 Ethernet Controller */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_36),
|
||||
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
|
||||
PCI_DEVICE(0x10DE, 0x0AB0),
|
||||
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
|
||||
},
|
||||
{ /* MCP79 Ethernet Controller */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_37),
|
||||
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
|
||||
PCI_DEVICE(0x10DE, 0x0AB1),
|
||||
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
|
||||
},
|
||||
{ /* MCP79 Ethernet Controller */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_38),
|
||||
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
|
||||
PCI_DEVICE(0x10DE, 0x0AB2),
|
||||
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
|
||||
},
|
||||
{ /* MCP79 Ethernet Controller */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_39),
|
||||
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
|
||||
PCI_DEVICE(0x10DE, 0x0AB3),
|
||||
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
|
||||
},
|
||||
{ /* MCP89 Ethernet Controller */
|
||||
PCI_DEVICE(0x10DE, 0x0D7D),
|
||||
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX,
|
||||
},
|
||||
{0,},
|
||||
};
|
||||
|
@ -6390,6 +6386,8 @@ module_param(dma_64bit, int, 0);
|
|||
MODULE_PARM_DESC(dma_64bit, "High DMA is enabled by setting to 1 and disabled by setting to 0.");
|
||||
module_param(phy_cross, int, 0);
|
||||
MODULE_PARM_DESC(phy_cross, "Phy crossover detection for Realtek 8201 phy is enabled by setting to 1 and disabled by setting to 0.");
|
||||
module_param(phy_power_down, int, 0);
|
||||
MODULE_PARM_DESC(phy_power_down, "Power down phy and disable link when interface is down (1), or leave phy powered up (0).");
|
||||
|
||||
MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>");
|
||||
MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");
|
||||
|
|
|
@ -301,13 +301,17 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev,
|
|||
of_device_is_compatible(np, "ucc_geth_phy")) {
|
||||
#ifdef CONFIG_UCC_GETH
|
||||
u32 id;
|
||||
static u32 mii_mng_master;
|
||||
|
||||
tbipa = ®s->utbipar;
|
||||
|
||||
if ((err = get_ucc_id_for_range(addr, addr + size, &id)))
|
||||
goto err_free_irqs;
|
||||
|
||||
ucc_set_qe_mux_mii_mng(id - 1);
|
||||
if (!mii_mng_master) {
|
||||
mii_mng_master = id;
|
||||
ucc_set_qe_mux_mii_mng(id - 1);
|
||||
}
|
||||
#else
|
||||
err = -ENODEV;
|
||||
goto err_free_irqs;
|
||||
|
|
|
@ -259,7 +259,7 @@ extern const char gfar_driver_version[];
|
|||
(IEVENT_RXC | IEVENT_BSY | IEVENT_EBERR | IEVENT_MSRO | \
|
||||
IEVENT_BABT | IEVENT_TXC | IEVENT_TXE | IEVENT_LC \
|
||||
| IEVENT_CRL | IEVENT_XFUN | IEVENT_DPE | IEVENT_PERR \
|
||||
| IEVENT_MAG)
|
||||
| IEVENT_MAG | IEVENT_BABR)
|
||||
|
||||
#define IMASK_INIT_CLEAR 0x00000000
|
||||
#define IMASK_BABR 0x80000000
|
||||
|
|
|
@ -1163,7 +1163,7 @@ static void hamachi_tx_timeout(struct net_device *dev)
|
|||
hmp->rx_ring[RX_RING_SIZE-1].status_n_length |= cpu_to_le32(DescEndRing);
|
||||
|
||||
/* Trigger an immediate transmit demand. */
|
||||
dev->trans_start = jiffies;
|
||||
dev->trans_start = jiffies; /* prevent tx timeout */
|
||||
hmp->stats.tx_errors++;
|
||||
|
||||
/* Restart the chip's Tx/Rx processes . */
|
||||
|
@ -1364,7 +1364,6 @@ static int hamachi_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
hmp->tx_full = 1;
|
||||
netif_stop_queue(dev);
|
||||
}
|
||||
dev->trans_start = jiffies;
|
||||
|
||||
if (hamachi_debug > 4) {
|
||||
printk(KERN_DEBUG "%s: Hamachi transmit frame #%d queued in slot %d.\n",
|
||||
|
|
|
@ -3139,8 +3139,7 @@ static inline int igb_tx_map_adv(struct igb_adapter *adapter,
|
|||
/* set time_stamp *before* dma to help avoid a possible race */
|
||||
buffer_info->time_stamp = jiffies;
|
||||
buffer_info->next_to_watch = i;
|
||||
buffer_info->dma = map[count];
|
||||
count++;
|
||||
buffer_info->dma = skb_shinfo(skb)->dma_head;
|
||||
|
||||
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
|
||||
struct skb_frag_struct *frag;
|
||||
|
@ -3164,7 +3163,7 @@ static inline int igb_tx_map_adv(struct igb_adapter *adapter,
|
|||
tx_ring->buffer_info[i].skb = skb;
|
||||
tx_ring->buffer_info[first].next_to_watch = i;
|
||||
|
||||
return count;
|
||||
return count + 1;
|
||||
}
|
||||
|
||||
static inline void igb_tx_queue_adv(struct igb_adapter *adapter,
|
||||
|
@ -3344,7 +3343,6 @@ static int igb_xmit_frame_ring_adv(struct sk_buff *skb,
|
|||
if (count) {
|
||||
igb_tx_queue_adv(adapter, tx_ring, tx_flags, count,
|
||||
skb->len, hdr_len);
|
||||
netdev->trans_start = jiffies;
|
||||
/* Make sure there is space in the ring for the next send. */
|
||||
igb_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 4);
|
||||
} else {
|
||||
|
|
|
@ -2119,8 +2119,7 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
|
|||
/* set time_stamp *before* dma to help avoid a possible race */
|
||||
buffer_info->time_stamp = jiffies;
|
||||
buffer_info->next_to_watch = i;
|
||||
buffer_info->dma = map[count];
|
||||
count++;
|
||||
buffer_info->dma = skb_shinfo(skb)->dma_head;
|
||||
|
||||
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
|
||||
struct skb_frag_struct *frag;
|
||||
|
@ -2144,7 +2143,7 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
|
|||
tx_ring->buffer_info[i].skb = skb;
|
||||
tx_ring->buffer_info[first].next_to_watch = i;
|
||||
|
||||
return count;
|
||||
return count + 1;
|
||||
}
|
||||
|
||||
static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter,
|
||||
|
@ -2270,7 +2269,6 @@ static int igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
|
|||
if (count) {
|
||||
igbvf_tx_queue_adv(adapter, tx_ring, tx_flags, count,
|
||||
skb->len, hdr_len);
|
||||
netdev->trans_start = jiffies;
|
||||
/* Make sure there is space in the ring for the next send. */
|
||||
igbvf_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 4);
|
||||
} else {
|
||||
|
|
|
@ -1859,6 +1859,42 @@ static void irda_usb_disconnect(struct usb_interface *intf)
|
|||
IRDA_DEBUG(0, "%s(), USB IrDA Disconnected\n", __func__);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
/* USB suspend, so power off the transmitter/receiver */
|
||||
static int irda_usb_suspend(struct usb_interface *intf, pm_message_t message)
|
||||
{
|
||||
struct irda_usb_cb *self = usb_get_intfdata(intf);
|
||||
int i;
|
||||
|
||||
netif_device_detach(self->netdev);
|
||||
|
||||
if (self->tx_urb != NULL)
|
||||
usb_kill_urb(self->tx_urb);
|
||||
if (self->speed_urb != NULL)
|
||||
usb_kill_urb(self->speed_urb);
|
||||
for (i = 0; i < self->max_rx_urb; i++) {
|
||||
if (self->rx_urb[i] != NULL)
|
||||
usb_kill_urb(self->rx_urb[i]);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Coming out of suspend, so reset hardware */
|
||||
static int irda_usb_resume(struct usb_interface *intf)
|
||||
{
|
||||
struct irda_usb_cb *self = usb_get_intfdata(intf);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < self->max_rx_urb; i++) {
|
||||
if (self->rx_urb[i] != NULL)
|
||||
usb_submit_urb(self->rx_urb[i], GFP_KERNEL);
|
||||
}
|
||||
|
||||
netif_device_attach(self->netdev);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*------------------------------------------------------------------*/
|
||||
/*
|
||||
* USB device callbacks
|
||||
|
@ -1868,6 +1904,10 @@ static struct usb_driver irda_driver = {
|
|||
.probe = irda_usb_probe,
|
||||
.disconnect = irda_usb_disconnect,
|
||||
.id_table = dongles,
|
||||
#ifdef CONFIG_PM
|
||||
.suspend = irda_usb_suspend,
|
||||
.resume = irda_usb_resume,
|
||||
#endif
|
||||
};
|
||||
|
||||
/************************* MODULE CALLBACKS *************************/
|
||||
|
|
|
@ -1300,7 +1300,7 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
|
|||
buffer_info->length = size;
|
||||
WARN_ON(buffer_info->dma != 0);
|
||||
buffer_info->time_stamp = jiffies;
|
||||
buffer_info->dma = map[0] + offset;
|
||||
buffer_info->dma = skb_shinfo(skb)->dma_head + offset;
|
||||
pci_map_single(adapter->pdev,
|
||||
skb->data + offset,
|
||||
size,
|
||||
|
@ -1340,7 +1340,7 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
|
|||
|
||||
buffer_info->length = size;
|
||||
buffer_info->time_stamp = jiffies;
|
||||
buffer_info->dma = map[f + 1] + offset;
|
||||
buffer_info->dma = map[f] + offset;
|
||||
buffer_info->next_to_watch = 0;
|
||||
|
||||
len -= size;
|
||||
|
@ -1488,7 +1488,6 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
|
|||
|
||||
if (count) {
|
||||
ixgb_tx_queue(adapter, count, vlan_id, tx_flags);
|
||||
netdev->trans_start = jiffies;
|
||||
/* Make sure there is space in the ring for the next send. */
|
||||
ixgb_maybe_stop_tx(netdev, &adapter->tx_ring, DESC_NEEDED);
|
||||
|
||||
|
|
|
@ -121,17 +121,18 @@ struct ixgbe_queue_stats {
|
|||
|
||||
struct ixgbe_ring {
|
||||
void *desc; /* descriptor ring memory */
|
||||
dma_addr_t dma; /* phys. address of descriptor ring */
|
||||
unsigned int size; /* length in bytes */
|
||||
unsigned int count; /* amount of descriptors */
|
||||
unsigned int next_to_use;
|
||||
unsigned int next_to_clean;
|
||||
|
||||
int queue_index; /* needed for multiqueue queue management */
|
||||
union {
|
||||
struct ixgbe_tx_buffer *tx_buffer_info;
|
||||
struct ixgbe_rx_buffer *rx_buffer_info;
|
||||
};
|
||||
u8 atr_sample_rate;
|
||||
u8 atr_count;
|
||||
u16 count; /* amount of descriptors */
|
||||
u16 rx_buf_len;
|
||||
u16 next_to_use;
|
||||
u16 next_to_clean;
|
||||
|
||||
u8 queue_index; /* needed for multiqueue queue management */
|
||||
|
||||
u16 head;
|
||||
u16 tail;
|
||||
|
@ -139,23 +140,24 @@ struct ixgbe_ring {
|
|||
unsigned int total_bytes;
|
||||
unsigned int total_packets;
|
||||
|
||||
u16 reg_idx; /* holds the special value that gets the hardware register
|
||||
* offset associated with this ring, which is different
|
||||
* for DCB and RSS modes */
|
||||
|
||||
#ifdef CONFIG_IXGBE_DCA
|
||||
/* cpu for tx queue */
|
||||
int cpu;
|
||||
#endif
|
||||
|
||||
u16 work_limit; /* max work per interrupt */
|
||||
u16 reg_idx; /* holds the special value that gets
|
||||
* the hardware register offset
|
||||
* associated with this ring, which is
|
||||
* different for DCB and RSS modes
|
||||
*/
|
||||
|
||||
struct ixgbe_queue_stats stats;
|
||||
u64 v_idx; /* maps directly to the index for this ring in the hardware
|
||||
* vector array, can also be used for finding the bit in EICR
|
||||
* and friends that represents the vector for this ring */
|
||||
unsigned long reinit_state;
|
||||
u64 rsc_count; /* stat for coalesced packets */
|
||||
|
||||
|
||||
u16 work_limit; /* max work per interrupt */
|
||||
u16 rx_buf_len;
|
||||
u64 rsc_count; /* stat for coalesced packets */
|
||||
unsigned int size; /* length in bytes */
|
||||
dma_addr_t dma; /* phys. address of descriptor ring */
|
||||
};
|
||||
|
||||
enum ixgbe_ring_f_enum {
|
||||
|
@ -163,6 +165,7 @@ enum ixgbe_ring_f_enum {
|
|||
RING_F_DCB,
|
||||
RING_F_VMDQ,
|
||||
RING_F_RSS,
|
||||
RING_F_FDIR,
|
||||
#ifdef IXGBE_FCOE
|
||||
RING_F_FCOE,
|
||||
#endif /* IXGBE_FCOE */
|
||||
|
@ -173,6 +176,7 @@ enum ixgbe_ring_f_enum {
|
|||
#define IXGBE_MAX_DCB_INDICES 8
|
||||
#define IXGBE_MAX_RSS_INDICES 16
|
||||
#define IXGBE_MAX_VMDQ_INDICES 16
|
||||
#define IXGBE_MAX_FDIR_INDICES 64
|
||||
#ifdef IXGBE_FCOE
|
||||
#define IXGBE_MAX_FCOE_INDICES 8
|
||||
#endif /* IXGBE_FCOE */
|
||||
|
@ -193,6 +197,9 @@ struct ixgbe_ring_feature {
|
|||
*/
|
||||
struct ixgbe_q_vector {
|
||||
struct ixgbe_adapter *adapter;
|
||||
unsigned int v_idx; /* index of q_vector within array, also used for
|
||||
* finding the bit in EICR and friends that
|
||||
* represents the vector for this ring */
|
||||
struct napi_struct napi;
|
||||
DECLARE_BITMAP(rxr_idx, MAX_RX_QUEUES); /* Rx ring indices */
|
||||
DECLARE_BITMAP(txr_idx, MAX_TX_QUEUES); /* Tx ring indices */
|
||||
|
@ -201,7 +208,6 @@ struct ixgbe_q_vector {
|
|||
u8 tx_itr;
|
||||
u8 rx_itr;
|
||||
u32 eitr;
|
||||
u32 v_idx; /* vector index in list */
|
||||
};
|
||||
|
||||
/* Helper macros to switch between ints/sec and what the register uses.
|
||||
|
@ -223,6 +229,10 @@ struct ixgbe_q_vector {
|
|||
#define IXGBE_TX_CTXTDESC_ADV(R, i) \
|
||||
(&(((struct ixgbe_adv_tx_context_desc *)((R).desc))[i]))
|
||||
|
||||
#define IXGBE_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i]))
|
||||
#define IXGBE_TX_DESC(R, i) IXGBE_GET_DESC(R, i, ixgbe_legacy_tx_desc)
|
||||
#define IXGBE_RX_DESC(R, i) IXGBE_GET_DESC(R, i, ixgbe_legacy_rx_desc)
|
||||
|
||||
#define IXGBE_MAX_JUMBO_FRAME_SIZE 16128
|
||||
#ifdef IXGBE_FCOE
|
||||
/* Use 3K as the baby jumbo frame size for FCoE */
|
||||
|
@ -315,10 +325,13 @@ struct ixgbe_adapter {
|
|||
#define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1 << 23)
|
||||
#define IXGBE_FLAG_IN_SFP_LINK_TASK (u32)(1 << 24)
|
||||
#define IXGBE_FLAG_IN_SFP_MOD_TASK (u32)(1 << 25)
|
||||
#define IXGBE_FLAG_RSC_CAPABLE (u32)(1 << 26)
|
||||
#define IXGBE_FLAG_RSC_ENABLED (u32)(1 << 27)
|
||||
#define IXGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 26)
|
||||
#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 27)
|
||||
#define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 29)
|
||||
|
||||
u32 flags2;
|
||||
#define IXGBE_FLAG2_RSC_CAPABLE (u32)(1)
|
||||
#define IXGBE_FLAG2_RSC_ENABLED (u32)(1 << 1)
|
||||
/* default to trying for four seconds */
|
||||
#define IXGBE_TRY_LINK_TIMEOUT (4 * HZ)
|
||||
|
||||
|
@ -327,6 +340,10 @@ struct ixgbe_adapter {
|
|||
struct pci_dev *pdev;
|
||||
struct net_device_stats net_stats;
|
||||
|
||||
u32 test_icr;
|
||||
struct ixgbe_ring test_tx_ring;
|
||||
struct ixgbe_ring test_rx_ring;
|
||||
|
||||
/* structs defined in ixgbe_hw.h */
|
||||
struct ixgbe_hw hw;
|
||||
u16 msg_enable;
|
||||
|
@ -349,6 +366,10 @@ struct ixgbe_adapter {
|
|||
struct timer_list sfp_timer;
|
||||
struct work_struct multispeed_fiber_task;
|
||||
struct work_struct sfp_config_module_task;
|
||||
u32 fdir_pballoc;
|
||||
u32 atr_sample_rate;
|
||||
spinlock_t fdir_perfect_lock;
|
||||
struct work_struct fdir_reinit_task;
|
||||
#ifdef IXGBE_FCOE
|
||||
struct ixgbe_fcoe fcoe;
|
||||
#endif /* IXGBE_FCOE */
|
||||
|
@ -361,6 +382,7 @@ enum ixbge_state_t {
|
|||
__IXGBE_TESTING,
|
||||
__IXGBE_RESETTING,
|
||||
__IXGBE_DOWN,
|
||||
__IXGBE_FDIR_INIT_DONE,
|
||||
__IXGBE_SFP_MODULE_NOT_FOUND
|
||||
};
|
||||
|
||||
|
@ -393,7 +415,63 @@ extern void ixgbe_free_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *)
|
|||
extern void ixgbe_update_stats(struct ixgbe_adapter *adapter);
|
||||
extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
|
||||
extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
|
||||
extern void ixgbe_write_eitr(struct ixgbe_adapter *, int, u32);
|
||||
extern void ixgbe_write_eitr(struct ixgbe_q_vector *);
|
||||
extern int ethtool_ioctl(struct ifreq *ifr);
|
||||
extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
|
||||
extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc);
|
||||
extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc);
|
||||
extern s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
|
||||
struct ixgbe_atr_input *input,
|
||||
u8 queue);
|
||||
extern s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
|
||||
struct ixgbe_atr_input *input,
|
||||
u16 soft_id,
|
||||
u8 queue);
|
||||
extern u16 ixgbe_atr_compute_hash_82599(struct ixgbe_atr_input *input, u32 key);
|
||||
extern s32 ixgbe_atr_set_vlan_id_82599(struct ixgbe_atr_input *input,
|
||||
u16 vlan_id);
|
||||
extern s32 ixgbe_atr_set_src_ipv4_82599(struct ixgbe_atr_input *input,
|
||||
u32 src_addr);
|
||||
extern s32 ixgbe_atr_set_dst_ipv4_82599(struct ixgbe_atr_input *input,
|
||||
u32 dst_addr);
|
||||
extern s32 ixgbe_atr_set_src_ipv6_82599(struct ixgbe_atr_input *input,
|
||||
u32 src_addr_1, u32 src_addr_2,
|
||||
u32 src_addr_3, u32 src_addr_4);
|
||||
extern s32 ixgbe_atr_set_dst_ipv6_82599(struct ixgbe_atr_input *input,
|
||||
u32 dst_addr_1, u32 dst_addr_2,
|
||||
u32 dst_addr_3, u32 dst_addr_4);
|
||||
extern s32 ixgbe_atr_set_src_port_82599(struct ixgbe_atr_input *input,
|
||||
u16 src_port);
|
||||
extern s32 ixgbe_atr_set_dst_port_82599(struct ixgbe_atr_input *input,
|
||||
u16 dst_port);
|
||||
extern s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input,
|
||||
u16 flex_byte);
|
||||
extern s32 ixgbe_atr_set_vm_pool_82599(struct ixgbe_atr_input *input,
|
||||
u8 vm_pool);
|
||||
extern s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input,
|
||||
u8 l4type);
|
||||
extern s32 ixgbe_atr_get_vlan_id_82599(struct ixgbe_atr_input *input,
|
||||
u16 *vlan_id);
|
||||
extern s32 ixgbe_atr_get_src_ipv4_82599(struct ixgbe_atr_input *input,
|
||||
u32 *src_addr);
|
||||
extern s32 ixgbe_atr_get_dst_ipv4_82599(struct ixgbe_atr_input *input,
|
||||
u32 *dst_addr);
|
||||
extern s32 ixgbe_atr_get_src_ipv6_82599(struct ixgbe_atr_input *input,
|
||||
u32 *src_addr_1, u32 *src_addr_2,
|
||||
u32 *src_addr_3, u32 *src_addr_4);
|
||||
extern s32 ixgbe_atr_get_dst_ipv6_82599(struct ixgbe_atr_input *input,
|
||||
u32 *dst_addr_1, u32 *dst_addr_2,
|
||||
u32 *dst_addr_3, u32 *dst_addr_4);
|
||||
extern s32 ixgbe_atr_get_src_port_82599(struct ixgbe_atr_input *input,
|
||||
u16 *src_port);
|
||||
extern s32 ixgbe_atr_get_dst_port_82599(struct ixgbe_atr_input *input,
|
||||
u16 *dst_port);
|
||||
extern s32 ixgbe_atr_get_flex_byte_82599(struct ixgbe_atr_input *input,
|
||||
u16 *flex_byte);
|
||||
extern s32 ixgbe_atr_get_vm_pool_82599(struct ixgbe_atr_input *input,
|
||||
u8 *vm_pool);
|
||||
extern s32 ixgbe_atr_get_l4type_82599(struct ixgbe_atr_input *input,
|
||||
u8 *l4type);
|
||||
#ifdef IXGBE_FCOE
|
||||
extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
|
||||
extern int ixgbe_fso(struct ixgbe_adapter *adapter,
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue