Automatic merge with /usr/src/ntfs-2.6.git

This commit is contained in:
Anton Altaparmakov 2005-06-08 15:45:45 +01:00
commit 364f6c717d
172 changed files with 5280 additions and 2314 deletions

View file

@ -14,7 +14,7 @@
</authorgroup>
<copyright>
<year>2003</year>
<year>2003-2005</year>
<holder>Jeff Garzik</holder>
</copyright>
@ -44,30 +44,38 @@
<toc></toc>
<chapter id="libataThanks">
<title>Thanks</title>
<chapter id="libataIntroduction">
<title>Introduction</title>
<para>
The bulk of the ATA knowledge comes thanks to long conversations with
Andre Hedrick (www.linux-ide.org).
libATA is a library used inside the Linux kernel to support ATA host
controllers and devices. libATA provides an ATA driver API, class
transports for ATA and ATAPI devices, and SCSI&lt;-&gt;ATA translation
for ATA devices according to the T10 SAT specification.
</para>
<para>
Thanks to Alan Cox for pointing out similarities
between SATA and SCSI, and in general for motivation to hack on
libata.
</para>
<para>
libata's device detection
method, ata_pio_devchk, and in general all the early probing was
based on extensive study of Hale Landis's probe/reset code in his
ATADRVR driver (www.ata-atapi.com).
This Guide documents the libATA driver API, library functions, library
internals, and a couple sample ATA low-level drivers.
</para>
</chapter>
<chapter id="libataDriverApi">
<title>libata Driver API</title>
<para>
struct ata_port_operations is defined for every low-level libata
hardware driver, and it controls how the low-level driver
interfaces with the ATA and SCSI layers.
</para>
<para>
FIS-based drivers will hook into the system with ->qc_prep() and
->qc_issue() high-level hooks. Hardware which behaves in a manner
similar to PCI IDE hardware may utilize several generic helpers,
defining at a bare minimum the bus I/O addresses of the ATA shadow
register blocks.
</para>
<sect1>
<title>struct ata_port_operations</title>
<sect2><title>Disable ATA port</title>
<programlisting>
void (*port_disable) (struct ata_port *);
</programlisting>
@ -78,6 +86,9 @@ void (*port_disable) (struct ata_port *);
unplug).
</para>
</sect2>
<sect2><title>Post-IDENTIFY device configuration</title>
<programlisting>
void (*dev_config) (struct ata_port *, struct ata_device *);
</programlisting>
@ -88,6 +99,9 @@ void (*dev_config) (struct ata_port *, struct ata_device *);
issue of SET FEATURES - XFER MODE, and prior to operation.
</para>
</sect2>
<sect2><title>Set PIO/DMA mode</title>
<programlisting>
void (*set_piomode) (struct ata_port *, struct ata_device *);
void (*set_dmamode) (struct ata_port *, struct ata_device *);
@ -108,6 +122,9 @@ void (*post_set_mode) (struct ata_port *ap);
->set_dma_mode() is only called if DMA is possible.
</para>
</sect2>
<sect2><title>Taskfile read/write</title>
<programlisting>
void (*tf_load) (struct ata_port *ap, struct ata_taskfile *tf);
void (*tf_read) (struct ata_port *ap, struct ata_taskfile *tf);
@ -120,6 +137,9 @@ void (*tf_read) (struct ata_port *ap, struct ata_taskfile *tf);
taskfile register values.
</para>
</sect2>
<sect2><title>ATA command execute</title>
<programlisting>
void (*exec_command)(struct ata_port *ap, struct ata_taskfile *tf);
</programlisting>
@ -129,17 +149,37 @@ void (*exec_command)(struct ata_port *ap, struct ata_taskfile *tf);
->tf_load(), to be initiated in hardware.
</para>
</sect2>
<sect2><title>Per-cmd ATAPI DMA capabilities filter</title>
<programlisting>
u8 (*check_status)(struct ata_port *ap);
void (*dev_select)(struct ata_port *ap, unsigned int device);
int (*check_atapi_dma) (struct ata_queued_cmd *qc);
</programlisting>
<para>
Reads the Status ATA shadow register from hardware. On some
hardware, this has the side effect of clearing the interrupt
condition.
Allow low-level driver to filter ATA PACKET commands, returning a status
indicating whether or not it is OK to use DMA for the supplied PACKET
command.
</para>
</sect2>
<sect2><title>Read specific ATA shadow registers</title>
<programlisting>
u8 (*check_status)(struct ata_port *ap);
u8 (*check_altstatus)(struct ata_port *ap);
u8 (*check_err)(struct ata_port *ap);
</programlisting>
<para>
Reads the Status/AltStatus/Error ATA shadow register from
hardware. On some hardware, reading the Status register has
the side effect of clearing the interrupt condition.
</para>
</sect2>
<sect2><title>Select ATA device on bus</title>
<programlisting>
void (*dev_select)(struct ata_port *ap, unsigned int device);
</programlisting>
@ -147,9 +187,13 @@ void (*dev_select)(struct ata_port *ap, unsigned int device);
<para>
Issues the low-level hardware command(s) that causes one of N
hardware devices to be considered 'selected' (active and
available for use) on the ATA bus.
available for use) on the ATA bus. This generally has no
meaning on FIS-based devices.
</para>
</sect2>
<sect2><title>Reset ATA bus</title>
<programlisting>
void (*phy_reset) (struct ata_port *ap);
</programlisting>
@ -162,17 +206,31 @@ void (*phy_reset) (struct ata_port *ap);
functions ata_bus_reset() or sata_phy_reset() for this hook.
</para>
</sect2>
<sect2><title>Control PCI IDE BMDMA engine</title>
<programlisting>
void (*bmdma_setup) (struct ata_queued_cmd *qc);
void (*bmdma_start) (struct ata_queued_cmd *qc);
void (*bmdma_stop) (struct ata_port *ap);
u8 (*bmdma_status) (struct ata_port *ap);
</programlisting>
<para>
When setting up an IDE BMDMA transaction, these hooks arm
(->bmdma_setup) and fire (->bmdma_start) the hardware's DMA
engine.
When setting up an IDE BMDMA transaction, these hooks arm
(->bmdma_setup), fire (->bmdma_start), and halt (->bmdma_stop)
the hardware's DMA engine. ->bmdma_status is used to read the standard
PCI IDE DMA Status register.
</para>
<para>
These hooks are typically either no-ops, or simply not implemented, in
FIS-based drivers.
</para>
</sect2>
<sect2><title>High-level taskfile hooks</title>
<programlisting>
void (*qc_prep) (struct ata_queued_cmd *qc);
int (*qc_issue) (struct ata_queued_cmd *qc);
@ -190,20 +248,26 @@ int (*qc_issue) (struct ata_queued_cmd *qc);
->qc_issue is used to make a command active, once the hardware
and S/G tables have been prepared. IDE BMDMA drivers use the
helper function ata_qc_issue_prot() for taskfile protocol-based
dispatch. More advanced drivers roll their own ->qc_issue
implementation, using this as the "issue new ATA command to
hardware" hook.
dispatch. More advanced drivers implement their own ->qc_issue.
</para>
</sect2>
<sect2><title>Timeout (error) handling</title>
<programlisting>
void (*eng_timeout) (struct ata_port *ap);
</programlisting>
<para>
This is a high level error handling function, called from the
error handling thread, when a command times out.
This is a high level error handling function, called from the
error handling thread, when a command times out. Most newer
hardware will implement its own error handling code here. IDE BMDMA
drivers may use the helper function ata_eng_timeout().
</para>
</sect2>
<sect2><title>Hardware interrupt handling</title>
<programlisting>
irqreturn_t (*irq_handler)(int, void *, struct pt_regs *);
void (*irq_clear) (struct ata_port *);
@ -216,6 +280,9 @@ void (*irq_clear) (struct ata_port *);
is quiet.
</para>
</sect2>
<sect2><title>SATA phy read/write</title>
<programlisting>
u32 (*scr_read) (struct ata_port *ap, unsigned int sc_reg);
void (*scr_write) (struct ata_port *ap, unsigned int sc_reg,
@ -227,6 +294,9 @@ void (*scr_write) (struct ata_port *ap, unsigned int sc_reg,
if ->phy_reset hook called the sata_phy_reset() helper function.
</para>
</sect2>
<sect2><title>Init and shutdown</title>
<programlisting>
int (*port_start) (struct ata_port *ap);
void (*port_stop) (struct ata_port *ap);
@ -240,15 +310,17 @@ void (*host_stop) (struct ata_host_set *host_set);
tasks.
</para>
<para>
->host_stop() is called when the rmmod or hot unplug process
begins. The hook must stop all hardware interrupts, DMA
engines, etc.
</para>
<para>
->port_stop() is called after ->host_stop(). It's sole function
is to release DMA/memory resources, now that they are no longer
actively being used.
</para>
<para>
->host_stop() is called after all ->port_stop() calls
have completed. The hook must finalize hardware shutdown, release DMA
and other resources, etc.
</para>
</sect2>
</sect1>
</chapter>
@ -279,4 +351,24 @@ void (*host_stop) (struct ata_host_set *host_set);
!Idrivers/scsi/sata_sil.c
</chapter>
<chapter id="libataThanks">
<title>Thanks</title>
<para>
The bulk of the ATA knowledge comes thanks to long conversations with
Andre Hedrick (www.linux-ide.org), and long hours pondering the ATA
and SCSI specifications.
</para>
<para>
Thanks to Alan Cox for pointing out similarities
between SATA and SCSI, and in general for motivation to hack on
libata.
</para>
<para>
libata's device detection
method, ata_pio_devchk, and in general all the early probing was
based on extensive study of Hale Landis's probe/reset code in his
ATADRVR driver (www.ata-atapi.com).
</para>
</chapter>
</book>

View file

@ -0,0 +1,128 @@
CPU frequency and voltage scaling statictics in the Linux(TM) kernel
L i n u x c p u f r e q - s t a t s d r i v e r
- information for users -
Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Contents
1. Introduction
2. Statistics Provided (with example)
3. Configuring cpufreq-stats
1. Introduction
cpufreq-stats is a driver that provices CPU frequency statistics for each CPU.
This statistics is provided in /sysfs as a bunch of read_only interfaces. This
interface (when configured) will appear in a seperate directory under cpufreq
in /sysfs (<sysfs root>/devices/system/cpu/cpuX/cpufreq/stats/) for each CPU.
Various statistics will form read_only files under this directory.
This driver is designed to be independent of any particular cpufreq_driver
that may be running on your CPU. So, it will work with any cpufreq_driver.
2. Statistics Provided (with example)
cpufreq stats provides following statistics (explained in detail below).
- time_in_state
- total_trans
- trans_table
All the statistics will be from the time the stats driver has been inserted
to the time when a read of a particular statistic is done. Obviously, stats
driver will not have any information about the the frequcny transitions before
the stats driver insertion.
--------------------------------------------------------------------------------
<mysystem>:/sys/devices/system/cpu/cpu0/cpufreq/stats # ls -l
total 0
drwxr-xr-x 2 root root 0 May 14 16:06 .
drwxr-xr-x 3 root root 0 May 14 15:58 ..
-r--r--r-- 1 root root 4096 May 14 16:06 time_in_state
-r--r--r-- 1 root root 4096 May 14 16:06 total_trans
-r--r--r-- 1 root root 4096 May 14 16:06 trans_table
--------------------------------------------------------------------------------
- time_in_state
This gives the amount of time spent in each of the frequencies supported by
this CPU. The cat output will have "<frequency> <time>" pair in each line, which
will mean this CPU spent <time> usertime units of time at <frequency>. Output
will have one line for each of the supported freuencies. usertime units here
is 10mS (similar to other time exported in /proc).
--------------------------------------------------------------------------------
<mysystem>:/sys/devices/system/cpu/cpu0/cpufreq/stats # cat time_in_state
3600000 2089
3400000 136
3200000 34
3000000 67
2800000 172488
--------------------------------------------------------------------------------
- total_trans
This gives the total number of frequency transitions on this CPU. The cat
output will have a single count which is the total number of frequency
transitions.
--------------------------------------------------------------------------------
<mysystem>:/sys/devices/system/cpu/cpu0/cpufreq/stats # cat total_trans
20
--------------------------------------------------------------------------------
- trans_table
This will give a fine grained information about all the CPU frequency
transitions. The cat output here is a two dimensional matrix, where an entry
<i,j> (row i, column j) represents the count of number of transitions from
Freq_i to Freq_j. Freq_i is in descending order with increasing rows and
Freq_j is in descending order with increasing columns. The output here also
contains the actual freq values for each row and column for better readability.
--------------------------------------------------------------------------------
<mysystem>:/sys/devices/system/cpu/cpu0/cpufreq/stats # cat trans_table
From : To
: 3600000 3400000 3200000 3000000 2800000
3600000: 0 5 0 0 0
3400000: 4 0 2 0 0
3200000: 0 1 0 2 0
3000000: 0 0 1 0 3
2800000: 0 0 0 2 0
--------------------------------------------------------------------------------
3. Configuring cpufreq-stats
To configure cpufreq-stats in your kernel
Config Main Menu
Power management options (ACPI, APM) --->
CPU Frequency scaling --->
[*] CPU Frequency scaling
<*> CPU frequency translation statistics
[*] CPU frequency translation statistics details
"CPU Frequency scaling" (CONFIG_CPU_FREQ) should be enabled to configure
cpufreq-stats.
"CPU frequency translation statistics" (CONFIG_CPU_FREQ_STAT) provides the
basic statistics which includes time_in_state and total_trans.
"CPU frequency translation statistics details" (CONFIG_CPU_FREQ_STAT_DETAILS)
provides fine grained cpufreq stats by trans_table. The reason for having a
seperate config option for trans_table is:
- trans_table goes against the traditional /sysfs rule of one value per
interface. It provides a whole bunch of value in a 2 dimensional matrix
form.
Once these two options are enabled and your CPU supports cpufrequency, you
will be able to see the CPU frequency statistics in /sysfs.

View file

@ -239,6 +239,12 @@ L: linux-usb-devel@lists.sourceforge.net
W: http://www.linux-usb.org/SpeedTouch/
S: Maintained
ALI1563 I2C DRIVER
P: Rudolf Marek
M: r.marek@sh.cvut.cz
L: sensors@stimpy.netroedge.com
S: Maintained
ALPHA PORT
P: Richard Henderson
M: rth@twiddle.net
@ -1023,8 +1029,8 @@ W: http://www.ia64-linux.org/
S: Maintained
SN-IA64 (Itanium) SUB-PLATFORM
P: Jesse Barnes
M: jbarnes@sgi.com
P: Greg Edwards
M: edwardsg@sgi.com
L: linux-altix@sgi.com
L: linux-ia64@vger.kernel.org
W: http://www.sgi.com/altix

View file

@ -1,7 +1,7 @@
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 12
EXTRAVERSION =-rc5
EXTRAVERSION =-rc6
NAME=Woozy Numbat
# *DOCUMENTATION*

View file

@ -54,7 +54,7 @@ asmlinkage void ret_from_fork(void);
void default_idle(void)
{
while(1) {
if (need_resched()) {
if (!need_resched()) {
local_irq_enable();
__asm__("sleep");
local_irq_disable();

View file

@ -23,7 +23,7 @@ config X86_ACPI_CPUFREQ
If in doubt, say N.
config ELAN_CPUFREQ
tristate "AMD Elan"
tristate "AMD Elan SC400 and SC410"
select CPU_FREQ_TABLE
depends on X86_ELAN
---help---
@ -38,6 +38,18 @@ config ELAN_CPUFREQ
If in doubt, say N.
config SC520_CPUFREQ
tristate "AMD Elan SC520"
select CPU_FREQ_TABLE
depends on X86_ELAN
---help---
This adds the CPUFreq driver for AMD Elan SC520 processor.
For details, take a look at <file:Documentation/cpu-freq/>.
If in doubt, say N.
config X86_POWERNOW_K6
tristate "AMD Mobile K6-2/K6-3 PowerNow!"
select CPU_FREQ_TABLE

View file

@ -3,6 +3,7 @@ obj-$(CONFIG_X86_POWERNOW_K7) += powernow-k7.o
obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o
obj-$(CONFIG_X86_LONGHAUL) += longhaul.o
obj-$(CONFIG_ELAN_CPUFREQ) += elanfreq.o
obj-$(CONFIG_SC520_CPUFREQ) += sc520_freq.o
obj-$(CONFIG_X86_LONGRUN) += longrun.o
obj-$(CONFIG_X86_GX_SUSPMOD) += gx-suspmod.o
obj-$(CONFIG_X86_SPEEDSTEP_ICH) += speedstep-ich.o

View file

@ -29,6 +29,7 @@
#include <linux/cpufreq.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/pci.h>
#include <asm/msr.h>
#include <asm/timex.h>
@ -119,7 +120,13 @@ static int longhaul_get_cpu_mult(void)
static void do_powersaver(union msr_longhaul *longhaul,
unsigned int clock_ratio_index)
{
struct pci_dev *dev;
unsigned long flags;
unsigned int tmp_mask;
int version;
int i;
u16 pci_cmd;
u16 cmd_state[64];
switch (cpu_model) {
case CPU_EZRA_T:
@ -137,17 +144,58 @@ static void do_powersaver(union msr_longhaul *longhaul,
longhaul->bits.SoftBusRatio4 = (clock_ratio_index & 0x10) >> 4;
longhaul->bits.EnableSoftBusRatio = 1;
longhaul->bits.RevisionKey = 0;
local_irq_disable();
wrmsrl(MSR_VIA_LONGHAUL, longhaul->val);
preempt_disable();
local_irq_save(flags);
/*
* get current pci bus master state for all devices
* and clear bus master bit
*/
dev = NULL;
i = 0;
do {
dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
if (dev != NULL) {
pci_read_config_word(dev, PCI_COMMAND, &pci_cmd);
cmd_state[i++] = pci_cmd;
pci_cmd &= ~PCI_COMMAND_MASTER;
pci_write_config_word(dev, PCI_COMMAND, pci_cmd);
}
} while (dev != NULL);
tmp_mask=inb(0x21); /* works on C3. save mask. */
outb(0xFE,0x21); /* TMR0 only */
outb(0xFF,0x80); /* delay */
local_irq_enable();
__hlt();
wrmsrl(MSR_VIA_LONGHAUL, longhaul->val);
__hlt();
local_irq_disable();
outb(tmp_mask,0x21); /* restore mask */
/* restore pci bus master state for all devices */
dev = NULL;
i = 0;
do {
dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
if (dev != NULL) {
pci_cmd = cmd_state[i++];
pci_write_config_byte(dev, PCI_COMMAND, pci_cmd);
}
} while (dev != NULL);
local_irq_restore(flags);
preempt_enable();
/* disable bus ratio bit */
rdmsrl(MSR_VIA_LONGHAUL, longhaul->val);
longhaul->bits.EnableSoftBusRatio = 0;
longhaul->bits.RevisionKey = version;
local_irq_disable();
wrmsrl(MSR_VIA_LONGHAUL, longhaul->val);
local_irq_enable();
}
/**
@ -578,7 +626,7 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy)
longhaul_setup_voltagescaling();
policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
policy->cpuinfo.transition_latency = 200000; /* nsec */
policy->cur = calc_speed(longhaul_get_cpu_mult());
ret = cpufreq_frequency_table_cpuinfo(policy, longhaul_table);

View file

@ -23,6 +23,7 @@
#include <linux/dmi.h>
#include <asm/msr.h>
#include <asm/timer.h>
#include <asm/timex.h>
#include <asm/io.h>
#include <asm/system.h>
@ -586,13 +587,17 @@ static int __init powernow_cpu_init (struct cpufreq_policy *policy)
rdmsrl (MSR_K7_FID_VID_STATUS, fidvidstatus.val);
/* A K7 with powernow technology is set to max frequency by BIOS */
fsb = (10 * cpu_khz) / fid_codes[fidvidstatus.bits.MFID];
/* recalibrate cpu_khz */
result = recalibrate_cpu_khz();
if (result)
return result;
fsb = (10 * cpu_khz) / fid_codes[fidvidstatus.bits.CFID];
if (!fsb) {
printk(KERN_WARNING PFX "can not determine bus frequency\n");
return -EINVAL;
}
dprintk("FSB: %3d.%03d MHz\n", fsb/1000, fsb%1000);
dprintk("FSB: %3dMHz\n", fsb/1000);
if (dmi_check_system(powernow_dmi_table) || acpi_force) {
printk (KERN_INFO PFX "PSB/PST known to be broken. Trying ACPI instead\n");

View file

@ -4,7 +4,7 @@
* GNU general public license version 2. See "COPYING" or
* http://www.gnu.org/licenses/gpl.html
*
* Support : paul.devriendt@amd.com
* Support : mark.langsdorf@amd.com
*
* Based on the powernow-k7.c module written by Dave Jones.
* (C) 2003 Dave Jones <davej@codemonkey.org.uk> on behalf of SuSE Labs
@ -15,12 +15,13 @@
*
* Valuable input gratefully received from Dave Jones, Pavel Machek,
* Dominik Brodowski, and others.
* Originally developed by Paul Devriendt.
* Processor information obtained from Chapter 9 (Power and Thermal Management)
* of the "BIOS and Kernel Developer's Guide for the AMD Athlon 64 and AMD
* Opteron Processors" available for download from www.amd.com
*
* Tables for specific CPUs can be infrerred from
* http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/30430.pdf
* http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/30430.pdf
*/
#include <linux/kernel.h>
@ -30,6 +31,7 @@
#include <linux/cpufreq.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/cpumask.h>
#include <asm/msr.h>
#include <asm/io.h>
@ -42,7 +44,7 @@
#define PFX "powernow-k8: "
#define BFX PFX "BIOS error: "
#define VERSION "version 1.00.09e"
#define VERSION "version 1.40.2"
#include "powernow-k8.h"
/* serialize freq changes */
@ -50,6 +52,10 @@ static DECLARE_MUTEX(fidvid_sem);
static struct powernow_k8_data *powernow_data[NR_CPUS];
#ifndef CONFIG_SMP
static cpumask_t cpu_core_map[1];
#endif
/* Return a frequency in MHz, given an input fid */
static u32 find_freq_from_fid(u32 fid)
{
@ -274,11 +280,18 @@ static int core_voltage_pre_transition(struct powernow_k8_data *data, u32 reqvid
{
u32 rvosteps = data->rvo;
u32 savefid = data->currfid;
u32 maxvid, lo;
dprintk("ph1 (cpu%d): start, currfid 0x%x, currvid 0x%x, reqvid 0x%x, rvo 0x%x\n",
smp_processor_id(),
data->currfid, data->currvid, reqvid, data->rvo);
rdmsr(MSR_FIDVID_STATUS, lo, maxvid);
maxvid = 0x1f & (maxvid >> 16);
dprintk("ph1 maxvid=0x%x\n", maxvid);
if (reqvid < maxvid) /* lower numbers are higher voltages */
reqvid = maxvid;
while (data->currvid > reqvid) {
dprintk("ph1: curr 0x%x, req vid 0x%x\n",
data->currvid, reqvid);
@ -286,8 +299,8 @@ static int core_voltage_pre_transition(struct powernow_k8_data *data, u32 reqvid
return 1;
}
while ((rvosteps > 0) && ((data->rvo + data->currvid) > reqvid)) {
if (data->currvid == 0) {
while ((rvosteps > 0) && ((data->rvo + data->currvid) > reqvid)) {
if (data->currvid == maxvid) {
rvosteps = 0;
} else {
dprintk("ph1: changing vid for rvo, req 0x%x\n",
@ -671,7 +684,7 @@ static int find_psb_table(struct powernow_k8_data *data)
* BIOS and Kernel Developer's Guide, which is available on
* www.amd.com
*/
printk(KERN_ERR PFX "BIOS error - no PSB\n");
printk(KERN_INFO PFX "BIOS error - no PSB or ACPI _PSS objects\n");
return -ENODEV;
}
@ -695,7 +708,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
struct cpufreq_frequency_table *powernow_table;
if (acpi_processor_register_performance(&data->acpi_data, data->cpu)) {
dprintk("register performance failed\n");
dprintk("register performance failed: bad ACPI data\n");
return -EIO;
}
@ -746,22 +759,23 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
continue;
}
if (fid < HI_FID_TABLE_BOTTOM) {
if (cntlofreq) {
/* if both entries are the same, ignore this
* one...
*/
if ((powernow_table[i].frequency != powernow_table[cntlofreq].frequency) ||
(powernow_table[i].index != powernow_table[cntlofreq].index)) {
printk(KERN_ERR PFX "Too many lo freq table entries\n");
goto err_out_mem;
}
dprintk("double low frequency table entry, ignoring it.\n");
powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID;
continue;
} else
cntlofreq = i;
/* verify only 1 entry from the lo frequency table */
if (fid < HI_FID_TABLE_BOTTOM) {
if (cntlofreq) {
/* if both entries are the same, ignore this
* one...
*/
if ((powernow_table[i].frequency != powernow_table[cntlofreq].frequency) ||
(powernow_table[i].index != powernow_table[cntlofreq].index)) {
printk(KERN_ERR PFX "Too many lo freq table entries\n");
goto err_out_mem;
}
dprintk("double low frequency table entry, ignoring it.\n");
powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID;
continue;
} else
cntlofreq = i;
}
if (powernow_table[i].frequency != (data->acpi_data.states[i].core_frequency * 1000)) {
@ -816,7 +830,7 @@ static int transition_frequency(struct powernow_k8_data *data, unsigned int inde
{
u32 fid;
u32 vid;
int res;
int res, i;
struct cpufreq_freqs freqs;
dprintk("cpu %d transition to index %u\n", smp_processor_id(), index);
@ -841,7 +855,8 @@ static int transition_frequency(struct powernow_k8_data *data, unsigned int inde
}
if ((fid < HI_FID_TABLE_BOTTOM) && (data->currfid < HI_FID_TABLE_BOTTOM)) {
printk("ignoring illegal change in lo freq table-%x to 0x%x\n",
printk(KERN_ERR PFX
"ignoring illegal change in lo freq table-%x to 0x%x\n",
data->currfid, fid);
return 1;
}
@ -850,18 +865,20 @@ static int transition_frequency(struct powernow_k8_data *data, unsigned int inde
smp_processor_id(), fid, vid);
freqs.cpu = data->cpu;
freqs.old = find_khz_freq_from_fid(data->currfid);
freqs.new = find_khz_freq_from_fid(fid);
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
for_each_cpu_mask(i, cpu_core_map[data->cpu]) {
freqs.cpu = i;
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
}
down(&fidvid_sem);
res = transition_fid_vid(data, fid, vid);
up(&fidvid_sem);
freqs.new = find_khz_freq_from_fid(data->currfid);
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
for_each_cpu_mask(i, cpu_core_map[data->cpu]) {
freqs.cpu = i;
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
}
return res;
}
@ -874,6 +891,7 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi
u32 checkvid = data->currvid;
unsigned int newstate;
int ret = -EIO;
int i;
/* only run on specific CPU from here on */
oldmask = current->cpus_allowed;
@ -902,22 +920,41 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi
data->currfid, data->currvid);
if ((checkvid != data->currvid) || (checkfid != data->currfid)) {
printk(KERN_ERR PFX
"error - out of sync, fid 0x%x 0x%x, vid 0x%x 0x%x\n",
checkfid, data->currfid, checkvid, data->currvid);
printk(KERN_INFO PFX
"error - out of sync, fix 0x%x 0x%x, vid 0x%x 0x%x\n",
checkfid, data->currfid, checkvid, data->currvid);
}
if (cpufreq_frequency_table_target(pol, data->powernow_table, targfreq, relation, &newstate))
goto err_out;
down(&fidvid_sem);
for_each_cpu_mask(i, cpu_core_map[pol->cpu]) {
/* make sure the sibling is initialized */
if (!powernow_data[i]) {
ret = 0;
up(&fidvid_sem);
goto err_out;
}
}
powernow_k8_acpi_pst_values(data, newstate);
if (transition_frequency(data, newstate)) {
printk(KERN_ERR PFX "transition frequency failed\n");
ret = 1;
up(&fidvid_sem);
goto err_out;
}
/* Update all the fid/vids of our siblings */
for_each_cpu_mask(i, cpu_core_map[pol->cpu]) {
powernow_data[i]->currvid = data->currvid;
powernow_data[i]->currfid = data->currfid;
}
up(&fidvid_sem);
pol->cur = find_khz_freq_from_fid(data->currfid);
ret = 0;
@ -962,7 +999,7 @@ static int __init powernowk8_cpu_init(struct cpufreq_policy *pol)
*/
if ((num_online_cpus() != 1) || (num_possible_cpus() != 1)) {
printk(KERN_INFO PFX "MP systems not supported by PSB BIOS structure\n");
printk(KERN_ERR PFX "MP systems not supported by PSB BIOS structure\n");
kfree(data);
return -ENODEV;
}
@ -1003,6 +1040,7 @@ static int __init powernowk8_cpu_init(struct cpufreq_policy *pol)
schedule();
pol->governor = CPUFREQ_DEFAULT_GOVERNOR;
pol->cpus = cpu_core_map[pol->cpu];
/* Take a crude guess here.
* That guess was in microseconds, so multiply with 1000 */
@ -1069,7 +1107,7 @@ static unsigned int powernowk8_get (unsigned int cpu)
return 0;
}
preempt_disable();
if (query_current_values_with_pending_wait(data))
goto out;
@ -1127,9 +1165,10 @@ static void __exit powernowk8_exit(void)
cpufreq_unregister_driver(&cpufreq_amd64_driver);
}
MODULE_AUTHOR("Paul Devriendt <paul.devriendt@amd.com>");
MODULE_AUTHOR("Paul Devriendt <paul.devriendt@amd.com> and Mark Langsdorf <mark.langsdorf@amd.com.");
MODULE_DESCRIPTION("AMD Athlon 64 and Opteron processor frequency driver.");
MODULE_LICENSE("GPL");
late_initcall(powernowk8_init);
module_exit(powernowk8_exit);

View file

@ -174,3 +174,18 @@ static int core_voltage_post_transition(struct powernow_k8_data *data, u32 reqvi
static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid);
static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index);
#ifndef for_each_cpu_mask
#define for_each_cpu_mask(i,mask) for (i=0;i<1;i++)
#endif
#ifdef CONFIG_SMP
static inline void define_siblings(int cpu, cpumask_t cpu_sharedcore_mask[])
{
}
#else
static inline void define_siblings(int cpu, cpumask_t cpu_sharedcore_mask[])
{
cpu_set(0, cpu_sharedcore_mask[0]);
}
#endif

View file

@ -0,0 +1,186 @@
/*
* sc520_freq.c: cpufreq driver for the AMD Elan sc520
*
* Copyright (C) 2005 Sean Young <sean@mess.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Based on elanfreq.c
*
* 2005-03-30: - initial revision
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/cpufreq.h>
#include <asm/msr.h>
#include <asm/timex.h>
#include <asm/io.h>
#define MMCR_BASE 0xfffef000 /* The default base address */
#define OFFS_CPUCTL 0x2 /* CPU Control Register */
static __u8 __iomem *cpuctl;
#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "sc520_freq", msg)
static struct cpufreq_frequency_table sc520_freq_table[] = {
{0x01, 100000},
{0x02, 133000},
{0, CPUFREQ_TABLE_END},
};
static unsigned int sc520_freq_get_cpu_frequency(unsigned int cpu)
{
u8 clockspeed_reg = *cpuctl;
switch (clockspeed_reg & 0x03) {
default:
printk(KERN_ERR "sc520_freq: error: cpuctl register has unexpected value %02x\n", clockspeed_reg);
case 0x01:
return 100000;
case 0x02:
return 133000;
}
}
static void sc520_freq_set_cpu_state (unsigned int state)
{
struct cpufreq_freqs freqs;
u8 clockspeed_reg;
freqs.old = sc520_freq_get_cpu_frequency(0);
freqs.new = sc520_freq_table[state].frequency;
freqs.cpu = 0; /* AMD Elan is UP */
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
dprintk("attempting to set frequency to %i kHz\n",
sc520_freq_table[state].frequency);
local_irq_disable();
clockspeed_reg = *cpuctl & ~0x03;
*cpuctl = clockspeed_reg | sc520_freq_table[state].index;
local_irq_enable();
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
};
static int sc520_freq_verify (struct cpufreq_policy *policy)
{
return cpufreq_frequency_table_verify(policy, &sc520_freq_table[0]);
}
static int sc520_freq_target (struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
{
unsigned int newstate = 0;
if (cpufreq_frequency_table_target(policy, sc520_freq_table, target_freq, relation, &newstate))
return -EINVAL;
sc520_freq_set_cpu_state(newstate);
return 0;
}
/*
* Module init and exit code
*/
static int sc520_freq_cpu_init(struct cpufreq_policy *policy)
{
struct cpuinfo_x86 *c = cpu_data;
int result;
/* capability check */
if (c->x86_vendor != X86_VENDOR_AMD ||
c->x86 != 4 || c->x86_model != 9)
return -ENODEV;
/* cpuinfo and default policy values */
policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
policy->cpuinfo.transition_latency = 1000000; /* 1ms */
policy->cur = sc520_freq_get_cpu_frequency(0);
result = cpufreq_frequency_table_cpuinfo(policy, sc520_freq_table);
if (result)
return (result);
cpufreq_frequency_table_get_attr(sc520_freq_table, policy->cpu);
return 0;
}
static int sc520_freq_cpu_exit(struct cpufreq_policy *policy)
{
cpufreq_frequency_table_put_attr(policy->cpu);
return 0;
}
static struct freq_attr* sc520_freq_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
NULL,
};
static struct cpufreq_driver sc520_freq_driver = {
.get = sc520_freq_get_cpu_frequency,
.verify = sc520_freq_verify,
.target = sc520_freq_target,
.init = sc520_freq_cpu_init,
.exit = sc520_freq_cpu_exit,
.name = "sc520_freq",
.owner = THIS_MODULE,
.attr = sc520_freq_attr,
};
static int __init sc520_freq_init(void)
{
struct cpuinfo_x86 *c = cpu_data;
/* Test if we have the right hardware */
if(c->x86_vendor != X86_VENDOR_AMD ||
c->x86 != 4 || c->x86_model != 9) {
dprintk("no Elan SC520 processor found!\n");
return -ENODEV;
}
cpuctl = ioremap((unsigned long)(MMCR_BASE + OFFS_CPUCTL), 1);
if(!cpuctl) {
printk(KERN_ERR "sc520_freq: error: failed to remap memory\n");
return -ENOMEM;
}
return cpufreq_register_driver(&sc520_freq_driver);
}
static void __exit sc520_freq_exit(void)
{
cpufreq_unregister_driver(&sc520_freq_driver);
iounmap(cpuctl);
}
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Sean Young <sean@mess.org>");
MODULE_DESCRIPTION("cpufreq driver for AMD's Elan sc520 CPU");
module_init(sc520_freq_init);
module_exit(sc520_freq_exit);

View file

@ -54,6 +54,8 @@ enum {
CPU_DOTHAN_A1,
CPU_DOTHAN_A2,
CPU_DOTHAN_B0,
CPU_MP4HT_D0,
CPU_MP4HT_E0,
};
static const struct cpu_id cpu_ids[] = {
@ -61,6 +63,8 @@ static const struct cpu_id cpu_ids[] = {
[CPU_DOTHAN_A1] = { 6, 13, 1 },
[CPU_DOTHAN_A2] = { 6, 13, 2 },
[CPU_DOTHAN_B0] = { 6, 13, 6 },
[CPU_MP4HT_D0] = {15, 3, 4 },
[CPU_MP4HT_E0] = {15, 4, 1 },
};
#define N_IDS (sizeof(cpu_ids)/sizeof(cpu_ids[0]))
@ -226,6 +230,8 @@ static struct cpu_model models[] =
{ &cpu_ids[CPU_DOTHAN_A1], NULL, 0, NULL },
{ &cpu_ids[CPU_DOTHAN_A2], NULL, 0, NULL },
{ &cpu_ids[CPU_DOTHAN_B0], NULL, 0, NULL },
{ &cpu_ids[CPU_MP4HT_D0], NULL, 0, NULL },
{ &cpu_ids[CPU_MP4HT_E0], NULL, 0, NULL },
{ NULL, }
};

View file

@ -336,7 +336,7 @@ unsigned int speedstep_get_freqs(unsigned int processor,
if (!prev_speed)
return -EIO;
dprintk("previous seped is %u\n", prev_speed);
dprintk("previous speed is %u\n", prev_speed);
local_irq_save(flags);
@ -348,7 +348,7 @@ unsigned int speedstep_get_freqs(unsigned int processor,
goto out;
}
dprintk("low seped is %u\n", *low_speed);
dprintk("low speed is %u\n", *low_speed);
/* switch to high state */
set_state(SPEEDSTEP_HIGH);
@ -358,7 +358,7 @@ unsigned int speedstep_get_freqs(unsigned int processor,
goto out;
}
dprintk("high seped is %u\n", *high_speed);
dprintk("high speed is %u\n", *high_speed);
if (*low_speed == *high_speed) {
ret = -ENODEV;

View file

@ -357,6 +357,9 @@ static int __init speedstep_init(void)
case SPEEDSTEP_PROCESSOR_PIII_C:
case SPEEDSTEP_PROCESSOR_PIII_C_EARLY:
break;
case SPEEDSTEP_PROCESSOR_P4M:
printk(KERN_INFO "speedstep-smi: you're trying to use this cpufreq driver on a Pentium 4-based CPU. Most likely it will not work.\n");
break;
default:
speedstep_processor = 0;
}

View file

@ -1502,11 +1502,13 @@ void __init setup_arch(char **cmdline_p)
if (efi_enabled)
efi_map_memmap();
#ifdef CONFIG_ACPI_BOOT
/*
* Parse the ACPI tables for possible boot-time SMP configuration.
*/
acpi_boot_table_init();
acpi_boot_init();
#endif
#ifdef CONFIG_X86_LOCAL_APIC
if (smp_found_config)

View file

@ -6,6 +6,7 @@
#include <linux/timex.h>
#include <linux/errno.h>
#include <linux/jiffies.h>
#include <linux/module.h>
#include <asm/io.h>
#include <asm/timer.h>
@ -24,7 +25,7 @@
#define CALIBRATE_TIME (5 * 1000020/HZ)
unsigned long __init calibrate_tsc(void)
unsigned long calibrate_tsc(void)
{
mach_prepare_counter();
@ -139,7 +140,7 @@ bad_calibration:
#endif
/* calculate cpu_khz */
void __init init_cpu_khz(void)
void init_cpu_khz(void)
{
if (cpu_has_tsc) {
unsigned long tsc_quotient = calibrate_tsc();
@ -158,3 +159,4 @@ void __init init_cpu_khz(void)
}
}
}

View file

@ -320,6 +320,26 @@ core_initcall(cpufreq_tsc);
static inline void cpufreq_delayed_get(void) { return; }
#endif
int recalibrate_cpu_khz(void)
{
#ifndef CONFIG_SMP
unsigned long cpu_khz_old = cpu_khz;
if (cpu_has_tsc) {
init_cpu_khz();
cpu_data[0].loops_per_jiffy =
cpufreq_scale(cpu_data[0].loops_per_jiffy,
cpu_khz_old,
cpu_khz);
return 0;
} else
return -ENODEV;
#else
return -ENODEV;
#endif
}
EXPORT_SYMBOL(recalibrate_cpu_khz);
static void mark_offset_tsc(void)
{
unsigned long lost,delay;

View file

@ -2427,7 +2427,7 @@ sys32_epoll_wait(int epfd, struct epoll_event32 __user * events, int maxevents,
{
struct epoll_event *events64 = NULL;
mm_segment_t old_fs = get_fs();
int error, numevents, size;
int numevents, size;
int evt_idx;
int do_free_pages = 0;

View file

@ -1182,7 +1182,7 @@ ENTRY(notify_resume_user)
;;
(pNonSys) mov out2=0 // out2==0 => not a syscall
.fframe 16
.spillpsp ar.unat, 16 // (note that offset is relative to psp+0x10!)
.spillsp ar.unat, 16
st8 [sp]=r9,-16 // allocate space for ar.unat and save it
st8 [out1]=loc1,-8 // save ar.pfs, out1=&sigscratch
.body
@ -1208,7 +1208,7 @@ GLOBAL_ENTRY(sys_rt_sigsuspend)
adds out2=8,sp // out2=&sigscratch->ar_pfs
;;
.fframe 16
.spillpsp ar.unat, 16 // (note that offset is relative to psp+0x10!)
.spillsp ar.unat, 16
st8 [sp]=r9,-16 // allocate space for ar.unat and save it
st8 [out2]=loc1,-8 // save ar.pfs, out2=&sigscratch
.body

View file

@ -1103,8 +1103,6 @@ ia64_mca_cpe_int_caller(int cpe_irq, void *arg, struct pt_regs *ptregs)
return IRQ_HANDLED;
}
#endif /* CONFIG_ACPI */
/*
* ia64_mca_cpe_poll
*
@ -1122,6 +1120,8 @@ ia64_mca_cpe_poll (unsigned long dummy)
platform_send_ipi(first_cpu(cpu_online_map), IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0);
}
#endif /* CONFIG_ACPI */
/*
* C portion of the OS INIT handler
*
@ -1390,8 +1390,7 @@ ia64_mca_init(void)
register_percpu_irq(IA64_MCA_WAKEUP_VECTOR, &mca_wkup_irqaction);
#ifdef CONFIG_ACPI
/* Setup the CPEI/P vector and handler */
cpe_vector = acpi_request_vector(ACPI_INTERRUPT_CPEI);
/* Setup the CPEI/P handler */
register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction);
#endif
@ -1436,6 +1435,7 @@ ia64_mca_late_init(void)
#ifdef CONFIG_ACPI
/* Setup the CPEI/P vector and handler */
cpe_vector = acpi_request_vector(ACPI_INTERRUPT_CPEI);
init_timer(&cpe_poll_timer);
cpe_poll_timer.function = ia64_mca_cpe_poll;

View file

@ -41,7 +41,7 @@
(pKStk) addl r3=THIS_CPU(ia64_mca_data),r3;; \
(pKStk) ld8 r3 = [r3];; \
(pKStk) addl r3=IA64_MCA_CPU_INIT_STACK_OFFSET,r3;; \
(pKStk) addl sp=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r3; \
(pKStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r3; \
(pUStk) mov ar.rsc=0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \
(pUStk) addl r22=IA64_RBS_OFFSET,r1; /* compute base of register backing store */ \
;; \
@ -50,7 +50,6 @@
(pUStk) mov r23=ar.bspstore; /* save ar.bspstore */ \
(pUStk) dep r22=-1,r22,61,3; /* compute kernel virtual addr of RBS */ \
;; \
(pKStk) addl r1=-IA64_PT_REGS_SIZE,r1; /* if in kernel mode, use sp (r12) */ \
(pUStk) mov ar.bspstore=r22; /* switch to kernel RBS */ \
;; \
(pUStk) mov r18=ar.bsp; \

View file

@ -11,7 +11,7 @@
* Version Perfmon-2.x is a rewrite of perfmon-1.x
* by Stephane Eranian, Hewlett Packard Co.
*
* Copyright (C) 1999-2003, 2005 Hewlett Packard Co
* Copyright (C) 1999-2005 Hewlett Packard Co
* Stephane Eranian <eranian@hpl.hp.com>
* David Mosberger-Tang <davidm@hpl.hp.com>
*
@ -497,6 +497,9 @@ typedef struct {
static pfm_stats_t pfm_stats[NR_CPUS];
static pfm_session_t pfm_sessions; /* global sessions information */
static spinlock_t pfm_alt_install_check = SPIN_LOCK_UNLOCKED;
static pfm_intr_handler_desc_t *pfm_alt_intr_handler;
static struct proc_dir_entry *perfmon_dir;
static pfm_uuid_t pfm_null_uuid = {0,};
@ -606,6 +609,7 @@ DEFINE_PER_CPU(unsigned long, pfm_syst_info);
DEFINE_PER_CPU(struct task_struct *, pmu_owner);
DEFINE_PER_CPU(pfm_context_t *, pmu_ctx);
DEFINE_PER_CPU(unsigned long, pmu_activation_number);
EXPORT_PER_CPU_SYMBOL_GPL(pfm_syst_info);
/* forward declaration */
@ -1325,7 +1329,7 @@ pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
error_conflict:
DPRINT(("system wide not possible, conflicting session [%d] on CPU%d\n",
pfm_sessions.pfs_sys_session[cpu]->pid,
smp_processor_id()));
cpu));
abort:
UNLOCK_PFS(flags);
@ -5555,26 +5559,32 @@ pfm_interrupt_handler(int irq, void *arg, struct pt_regs *regs)
int ret;
this_cpu = get_cpu();
min = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min;
max = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max;
if (likely(!pfm_alt_intr_handler)) {
min = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min;
max = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max;
start_cycles = ia64_get_itc();
start_cycles = ia64_get_itc();
ret = pfm_do_interrupt_handler(irq, arg, regs);
ret = pfm_do_interrupt_handler(irq, arg, regs);
total_cycles = ia64_get_itc();
total_cycles = ia64_get_itc();
/*
* don't measure spurious interrupts
*/
if (likely(ret == 0)) {
total_cycles -= start_cycles;
/*
* don't measure spurious interrupts
*/
if (likely(ret == 0)) {
total_cycles -= start_cycles;
if (total_cycles < min) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min = total_cycles;
if (total_cycles > max) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max = total_cycles;
if (total_cycles < min) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min = total_cycles;
if (total_cycles > max) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max = total_cycles;
pfm_stats[this_cpu].pfm_ovfl_intr_cycles += total_cycles;
pfm_stats[this_cpu].pfm_ovfl_intr_cycles += total_cycles;
}
}
else {
(*pfm_alt_intr_handler->handler)(irq, arg, regs);
}
put_cpu_no_resched();
return IRQ_HANDLED;
}
@ -6425,6 +6435,141 @@ static struct irqaction perfmon_irqaction = {
.name = "perfmon"
};
static void
pfm_alt_save_pmu_state(void *data)
{
struct pt_regs *regs;
regs = ia64_task_regs(current);
DPRINT(("called\n"));
/*
* should not be necessary but
* let's take not risk
*/
pfm_clear_psr_up();
pfm_clear_psr_pp();
ia64_psr(regs)->pp = 0;
/*
* This call is required
* May cause a spurious interrupt on some processors
*/
pfm_freeze_pmu();
ia64_srlz_d();
}
void
pfm_alt_restore_pmu_state(void *data)
{
struct pt_regs *regs;
regs = ia64_task_regs(current);
DPRINT(("called\n"));
/*
* put PMU back in state expected
* by perfmon
*/
pfm_clear_psr_up();
pfm_clear_psr_pp();
ia64_psr(regs)->pp = 0;
/*
* perfmon runs with PMU unfrozen at all times
*/
pfm_unfreeze_pmu();
ia64_srlz_d();
}
int
pfm_install_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl)
{
int ret, i;
int reserve_cpu;
/* some sanity checks */
if (hdl == NULL || hdl->handler == NULL) return -EINVAL;
/* do the easy test first */
if (pfm_alt_intr_handler) return -EBUSY;
/* one at a time in the install or remove, just fail the others */
if (!spin_trylock(&pfm_alt_install_check)) {
return -EBUSY;
}
/* reserve our session */
for_each_online_cpu(reserve_cpu) {
ret = pfm_reserve_session(NULL, 1, reserve_cpu);
if (ret) goto cleanup_reserve;
}
/* save the current system wide pmu states */
ret = on_each_cpu(pfm_alt_save_pmu_state, NULL, 0, 1);
if (ret) {
DPRINT(("on_each_cpu() failed: %d\n", ret));
goto cleanup_reserve;
}
/* officially change to the alternate interrupt handler */
pfm_alt_intr_handler = hdl;
spin_unlock(&pfm_alt_install_check);
return 0;
cleanup_reserve:
for_each_online_cpu(i) {
/* don't unreserve more than we reserved */
if (i >= reserve_cpu) break;
pfm_unreserve_session(NULL, 1, i);
}
spin_unlock(&pfm_alt_install_check);
return ret;
}
EXPORT_SYMBOL_GPL(pfm_install_alt_pmu_interrupt);
int
pfm_remove_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl)
{
int i;
int ret;
if (hdl == NULL) return -EINVAL;
/* cannot remove someone else's handler! */
if (pfm_alt_intr_handler != hdl) return -EINVAL;
/* one at a time in the install or remove, just fail the others */
if (!spin_trylock(&pfm_alt_install_check)) {
return -EBUSY;
}
pfm_alt_intr_handler = NULL;
ret = on_each_cpu(pfm_alt_restore_pmu_state, NULL, 0, 1);
if (ret) {
DPRINT(("on_each_cpu() failed: %d\n", ret));
}
for_each_online_cpu(i) {
pfm_unreserve_session(NULL, 1, i);
}
spin_unlock(&pfm_alt_install_check);
return 0;
}
EXPORT_SYMBOL_GPL(pfm_remove_alt_pmu_interrupt);
/*
* perfmon initialization routine, called from the initcall() table
*/

View file

@ -692,16 +692,30 @@ convert_to_non_syscall (struct task_struct *child, struct pt_regs *pt,
unsigned long cfm)
{
struct unw_frame_info info, prev_info;
unsigned long ip, pr;
unsigned long ip, sp, pr;
unw_init_from_blocked_task(&info, child);
while (1) {
prev_info = info;
if (unw_unwind(&info) < 0)
return;
if (unw_get_rp(&info, &ip) < 0)
unw_get_sp(&info, &sp);
if ((long)((unsigned long)child + IA64_STK_OFFSET - sp)
< IA64_PT_REGS_SIZE) {
dprintk("ptrace.%s: ran off the top of the kernel "
"stack\n", __FUNCTION__);
return;
if (ip < FIXADDR_USER_END)
}
if (unw_get_pr (&prev_info, &pr) < 0) {
unw_get_rp(&prev_info, &ip);
dprintk("ptrace.%s: failed to read "
"predicate register (ip=0x%lx)\n",
__FUNCTION__, ip);
return;
}
if (unw_is_intr_frame(&info)
&& (pr & (1UL << PRED_USER_STACK)))
break;
}

View file

@ -624,7 +624,7 @@ static struct {
__u16 thread_id;
__u16 proc_fixed_addr;
__u8 valid;
}mt_info[NR_CPUS] __devinit;
} mt_info[NR_CPUS] __devinitdata;
#ifdef CONFIG_HOTPLUG_CPU
static inline void

View file

@ -182,13 +182,6 @@ do_mmap2 (unsigned long addr, unsigned long len, int prot, int flags, int fd, un
}
}
/*
* A zero mmap always succeeds in Linux, independent of whether or not the
* remaining arguments are valid.
*/
if (len == 0)
goto out;
/* Careful about overflows.. */
len = PAGE_ALIGN(len);
if (!len || len > TASK_SIZE) {

View file

@ -271,6 +271,8 @@ void __init sn_setup(char **cmdline_p)
int major = sn_sal_rev_major(), minor = sn_sal_rev_minor();
extern void sn_cpu_init(void);
ia64_sn_plat_set_error_handling_features();
/*
* If the generic code has enabled vga console support - lets
* get rid of it again. This is a kludge for the fact that ACPI

View file

@ -45,11 +45,13 @@ asmlinkage void ret_from_fork(void);
*/
void default_idle(void)
{
while(1) {
if (need_resched())
__asm__("stop #0x2000" : : : "cc");
schedule();
local_irq_disable();
while (!need_resched()) {
/* This stop will re-enable interrupts */
__asm__("stop #0x2000" : : : "cc");
local_irq_disable();
}
local_irq_enable();
}
void (*idle)(void) = default_idle;
@ -63,7 +65,12 @@ void (*idle)(void) = default_idle;
void cpu_idle(void)
{
/* endless idle loop with no priority at all */
idle();
while (1) {
idle();
preempt_enable_no_resched();
schedule();
preempt_disable();
}
}
void machine_restart(char * __unused)

View file

@ -838,6 +838,17 @@ struct cpu_spec cpu_specs[] = {
.icache_bsize = 32,
.dcache_bsize = 32,
},
{ /* 405EP */
.pvr_mask = 0xffff0000,
.pvr_value = 0x51210000,
.cpu_name = "405EP",
.cpu_features = CPU_FTR_SPLIT_ID_CACHE |
CPU_FTR_USE_TB,
.cpu_user_features = PPC_FEATURE_32 |
PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
.icache_bsize = 32,
.dcache_bsize = 32,
},
#endif /* CONFIG_40x */
#ifdef CONFIG_44x

View file

@ -619,7 +619,7 @@ _GLOBAL(flush_instruction_cache)
_GLOBAL(flush_icache_range)
BEGIN_FTR_SECTION
blr /* for 601, do nothing */
END_FTR_SECTION_IFSET(PPC_FEATURE_UNIFIED_CACHE)
END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
li r5,L1_CACHE_LINE_SIZE-1
andc r3,r3,r5
subf r4,r3,r4
@ -736,7 +736,7 @@ _GLOBAL(flush_dcache_all)
_GLOBAL(__flush_dcache_icache)
BEGIN_FTR_SECTION
blr /* for 601, do nothing */
END_FTR_SECTION_IFSET(PPC_FEATURE_UNIFIED_CACHE)
END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
rlwinm r3,r3,0,0,19 /* Get page base address */
li r4,4096/L1_CACHE_LINE_SIZE /* Number of lines in a page */
mtctr r4
@ -764,7 +764,7 @@ END_FTR_SECTION_IFSET(PPC_FEATURE_UNIFIED_CACHE)
_GLOBAL(__flush_dcache_icache_phys)
BEGIN_FTR_SECTION
blr /* for 601, do nothing */
END_FTR_SECTION_IFSET(PPC_FEATURE_UNIFIED_CACHE)
END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
mfmsr r10
rlwinm r0,r10,0,28,26 /* clear DR */
mtmsr r0

View file

@ -626,8 +626,18 @@ inspect_node(phandle node, struct device_node *dad,
l = call_prom("package-to-path", 3, 1, node,
mem_start, mem_end - mem_start);
if (l >= 0) {
char *p, *ep;
np->full_name = PTRUNRELOC((char *) mem_start);
*(char *)(mem_start + l) = 0;
/* Fixup an Apple bug where they have bogus \0 chars in the
* middle of the path in some properties
*/
for (p = (char *)mem_start, ep = p + l; p < ep; p++)
if ((*p) == '\0') {
memmove(p, p+1, ep - p);
ep--;
}
mem_start = ALIGNUL(mem_start + l + 1);
}

View file

@ -436,15 +436,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
REST_8GPRS(14, r1)
REST_10GPRS(22, r1)
#ifdef CONFIG_PPC_ISERIES
clrrdi r7,r1,THREAD_SHIFT /* get current_thread_info() */
ld r7,TI_FLAGS(r7) /* Get run light flag */
mfspr r9,CTRLF
srdi r7,r7,TIF_RUN_LIGHT
insrdi r9,r7,1,63 /* Insert run light into CTRL */
mtspr CTRLT,r9
#endif
/* convert old thread to its task_struct for return value */
addi r3,r3,-THREAD
ld r7,_NIP(r1) /* Return to _switch caller in new task */

View file

@ -626,10 +626,10 @@ system_reset_iSeries:
lhz r24,PACAPACAINDEX(r13) /* Get processor # */
cmpwi 0,r24,0 /* Are we processor 0? */
beq .__start_initialization_iSeries /* Start up the first processor */
mfspr r4,CTRLF
li r5,RUNLATCH /* Turn off the run light */
mfspr r4,SPRN_CTRLF
li r5,CTRL_RUNLATCH /* Turn off the run light */
andc r4,r4,r5
mtspr CTRLT,r4
mtspr SPRN_CTRLT,r4
1:
HMT_LOW
@ -2082,9 +2082,9 @@ _GLOBAL(hmt_start_secondary)
mfspr r4, HID0
ori r4, r4, 0x1
mtspr HID0, r4
mfspr r4, CTRLF
mfspr r4, SPRN_CTRLF
oris r4, r4, 0x40
mtspr CTRLT, r4
mtspr SPRN_CTRLT, r4
blr
#endif

View file

@ -852,6 +852,28 @@ static int __init iSeries_src_init(void)
late_initcall(iSeries_src_init);
static int set_spread_lpevents(char *str)
{
unsigned long i;
unsigned long val = simple_strtoul(str, NULL, 0);
/*
* The parameter is the number of processors to share in processing
* lp events.
*/
if (( val > 0) && (val <= NR_CPUS)) {
for (i = 1; i < val; ++i)
paca[i].lpqueue_ptr = paca[0].lpqueue_ptr;
printk("lpevent processing spread over %ld processors\n", val);
} else {
printk("invalid spread_lpevents %ld\n", val);
}
return 1;
}
__setup("spread_lpevents=", set_spread_lpevents);
void __init iSeries_early_setup(void)
{
iSeries_fixup_klimit();

View file

@ -75,13 +75,9 @@ static int iSeries_idle(void)
{
struct paca_struct *lpaca;
long oldval;
unsigned long CTRL;
/* ensure iSeries run light will be out when idle */
clear_thread_flag(TIF_RUN_LIGHT);
CTRL = mfspr(CTRLF);
CTRL &= ~RUNLATCH;
mtspr(CTRLT, CTRL);
ppc64_runlatch_off();
lpaca = get_paca();
@ -111,7 +107,9 @@ static int iSeries_idle(void)
}
}
ppc64_runlatch_on();
schedule();
ppc64_runlatch_off();
}
return 0;

View file

@ -47,14 +47,6 @@ static void remove_node_proc_entries(struct device_node *np)
remove_proc_entry(pp->name, np->pde);
pp = pp->next;
}
/* Assuming that symlinks have the same parent directory as
* np->pde.
*/
if (np->name_link)
remove_proc_entry(np->name_link->name, parent->pde);
if (np->addr_link)
remove_proc_entry(np->addr_link->name, parent->pde);
if (np->pde)
remove_proc_entry(np->pde->name, parent->pde);
}

View file

@ -378,9 +378,6 @@ copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
childregs->gpr[1] = sp + sizeof(struct pt_regs);
p->thread.regs = NULL; /* no user register state */
clear_ti_thread_flag(p->thread_info, TIF_32BIT);
#ifdef CONFIG_PPC_ISERIES
set_ti_thread_flag(p->thread_info, TIF_RUN_LIGHT);
#endif
} else {
childregs->gpr[1] = usp;
p->thread.regs = childregs;

View file

@ -211,13 +211,23 @@ struct {
*/
#define ADDR(x) (u32) ((unsigned long)(x) - offset)
/*
* Error results ... some OF calls will return "-1" on error, some
* will return 0, some will return either. To simplify, here are
* macros to use with any ihandle or phandle return value to check if
* it is valid
*/
#define PROM_ERROR (-1u)
#define PHANDLE_VALID(p) ((p) != 0 && (p) != PROM_ERROR)
#define IHANDLE_VALID(i) ((i) != 0 && (i) != PROM_ERROR)
/* This is the one and *ONLY* place where we actually call open
* firmware from, since we need to make sure we're running in 32b
* mode when we do. We switch back to 64b mode upon return.
*/
#define PROM_ERROR (-1)
static int __init call_prom(const char *service, int nargs, int nret, ...)
{
int i;
@ -587,14 +597,13 @@ static void __init prom_send_capabilities(void)
{
unsigned long offset = reloc_offset();
ihandle elfloader;
int ret;
elfloader = call_prom("open", 1, 1, ADDR("/packages/elf-loader"));
if (elfloader == 0) {
prom_printf("couldn't open /packages/elf-loader\n");
return;
}
ret = call_prom("call-method", 3, 1, ADDR("process-elf-header"),
call_prom("call-method", 3, 1, ADDR("process-elf-header"),
elfloader, ADDR(&fake_elf));
call_prom("close", 1, 0, elfloader);
}
@ -646,7 +655,7 @@ static unsigned long __init alloc_up(unsigned long size, unsigned long align)
base = _ALIGN_UP(base + 0x100000, align)) {
prom_debug(" trying: 0x%x\n\r", base);
addr = (unsigned long)prom_claim(base, size, 0);
if ((int)addr != PROM_ERROR)
if (addr != PROM_ERROR)
break;
addr = 0;
if (align == 0)
@ -708,7 +717,7 @@ static unsigned long __init alloc_down(unsigned long size, unsigned long align,
for(; base > RELOC(alloc_bottom); base = _ALIGN_DOWN(base - 0x100000, align)) {
prom_debug(" trying: 0x%x\n\r", base);
addr = (unsigned long)prom_claim(base, size, 0);
if ((int)addr != PROM_ERROR)
if (addr != PROM_ERROR)
break;
addr = 0;
}
@ -902,18 +911,19 @@ static void __init prom_instantiate_rtas(void)
{
unsigned long offset = reloc_offset();
struct prom_t *_prom = PTRRELOC(&prom);
phandle prom_rtas, rtas_node;
phandle rtas_node;
ihandle rtas_inst;
u32 base, entry = 0;
u32 size = 0;
prom_debug("prom_instantiate_rtas: start...\n");
prom_rtas = call_prom("finddevice", 1, 1, ADDR("/rtas"));
prom_debug("prom_rtas: %x\n", prom_rtas);
if (prom_rtas == (phandle) -1)
rtas_node = call_prom("finddevice", 1, 1, ADDR("/rtas"));
prom_debug("rtas_node: %x\n", rtas_node);
if (!PHANDLE_VALID(rtas_node))
return;
prom_getprop(prom_rtas, "rtas-size", &size, sizeof(size));
prom_getprop(rtas_node, "rtas-size", &size, sizeof(size));
if (size == 0)
return;
@ -922,14 +932,18 @@ static void __init prom_instantiate_rtas(void)
prom_printf("RTAS allocation failed !\n");
return;
}
prom_printf("instantiating rtas at 0x%x", base);
rtas_node = call_prom("open", 1, 1, ADDR("/rtas"));
prom_printf("...");
rtas_inst = call_prom("open", 1, 1, ADDR("/rtas"));
if (!IHANDLE_VALID(rtas_inst)) {
prom_printf("opening rtas package failed");
return;
}
prom_printf("instantiating rtas at 0x%x ...", base);
if (call_prom("call-method", 3, 2,
ADDR("instantiate-rtas"),
rtas_node, base) != PROM_ERROR) {
rtas_inst, base) != PROM_ERROR) {
entry = (long)_prom->args.rets[1];
}
if (entry == 0) {
@ -940,8 +954,8 @@ static void __init prom_instantiate_rtas(void)
reserve_mem(base, size);
prom_setprop(prom_rtas, "linux,rtas-base", &base, sizeof(base));
prom_setprop(prom_rtas, "linux,rtas-entry", &entry, sizeof(entry));
prom_setprop(rtas_node, "linux,rtas-base", &base, sizeof(base));
prom_setprop(rtas_node, "linux,rtas-entry", &entry, sizeof(entry));
prom_debug("rtas base = 0x%x\n", base);
prom_debug("rtas entry = 0x%x\n", entry);
@ -1062,7 +1076,7 @@ static void __init prom_initialize_tce_table(void)
prom_printf("opening PHB %s", path);
phb_node = call_prom("open", 1, 1, path);
if ( (long)phb_node <= 0)
if (phb_node == 0)
prom_printf("... failed\n");
else
prom_printf("... done\n");
@ -1279,12 +1293,12 @@ static void __init prom_init_client_services(unsigned long pp)
/* get a handle for the stdout device */
_prom->chosen = call_prom("finddevice", 1, 1, ADDR("/chosen"));
if ((long)_prom->chosen <= 0)
if (!PHANDLE_VALID(_prom->chosen))
prom_panic("cannot find chosen"); /* msg won't be printed :( */
/* get device tree root */
_prom->root = call_prom("finddevice", 1, 1, ADDR("/"));
if ((long)_prom->root <= 0)
if (!PHANDLE_VALID(_prom->root))
prom_panic("cannot find device tree root"); /* msg won't be printed :( */
}
@ -1356,9 +1370,8 @@ static int __init prom_find_machine_type(void)
}
/* Default to pSeries. We need to know if we are running LPAR */
rtas = call_prom("finddevice", 1, 1, ADDR("/rtas"));
if (rtas != (phandle) -1) {
unsigned long x;
x = prom_getproplen(rtas, "ibm,hypertas-functions");
if (PHANDLE_VALID(rtas)) {
int x = prom_getproplen(rtas, "ibm,hypertas-functions");
if (x != PROM_ERROR) {
prom_printf("Hypertas detected, assuming LPAR !\n");
return PLATFORM_PSERIES_LPAR;
@ -1426,12 +1439,13 @@ static void __init prom_check_displays(void)
* leave some room at the end of the path for appending extra
* arguments
*/
if (call_prom("package-to-path", 3, 1, node, path, PROM_SCRATCH_SIZE-10) < 0)
if (call_prom("package-to-path", 3, 1, node, path,
PROM_SCRATCH_SIZE-10) == PROM_ERROR)
continue;
prom_printf("found display : %s, opening ... ", path);
ih = call_prom("open", 1, 1, path);
if (ih == (ihandle)0 || ih == (ihandle)-1) {
if (ih == 0) {
prom_printf("failed\n");
continue;
}
@ -1514,6 +1528,12 @@ static unsigned long __init dt_find_string(char *str)
return 0;
}
/*
* The Open Firmware 1275 specification states properties must be 31 bytes or
* less, however not all firmwares obey this. Make it 64 bytes to be safe.
*/
#define MAX_PROPERTY_NAME 64
static void __init scan_dt_build_strings(phandle node, unsigned long *mem_start,
unsigned long *mem_end)
{
@ -1527,10 +1547,12 @@ static void __init scan_dt_build_strings(phandle node, unsigned long *mem_start,
/* get and store all property names */
prev_name = RELOC("");
for (;;) {
/* 32 is max len of name including nul. */
namep = make_room(mem_start, mem_end, 32, 1);
if (call_prom("nextprop", 3, 1, node, prev_name, namep) <= 0) {
int rc;
/* 64 is max len of name including nul. */
namep = make_room(mem_start, mem_end, MAX_PROPERTY_NAME, 1);
rc = call_prom("nextprop", 3, 1, node, prev_name, namep);
if (rc != 1) {
/* No more nodes: unwind alloc */
*mem_start = (unsigned long)namep;
break;
@ -1555,18 +1577,12 @@ static void __init scan_dt_build_strings(phandle node, unsigned long *mem_start,
}
}
/*
* The Open Firmware 1275 specification states properties must be 31 bytes or
* less, however not all firmwares obey this. Make it 64 bytes to be safe.
*/
#define MAX_PROPERTY_NAME 64
static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start,
unsigned long *mem_end)
{
int l, align;
phandle child;
char *namep, *prev_name, *sstart;
char *namep, *prev_name, *sstart, *p, *ep;
unsigned long soff;
unsigned char *valp;
unsigned long offset = reloc_offset();
@ -1588,6 +1604,14 @@ static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start,
call_prom("package-to-path", 3, 1, node, namep, l);
}
namep[l] = '\0';
/* Fixup an Apple bug where they have bogus \0 chars in the
* middle of the path in some properties
*/
for (p = namep, ep = namep + l; p < ep; p++)
if (*p == '\0') {
memmove(p, p+1, ep - p);
ep--; l--;
}
*mem_start = _ALIGN(((unsigned long) namep) + strlen(namep) + 1, 4);
}
@ -1599,7 +1623,10 @@ static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start,
prev_name = RELOC("");
sstart = (char *)RELOC(dt_string_start);
for (;;) {
if (call_prom("nextprop", 3, 1, node, prev_name, pname) <= 0)
int rc;
rc = call_prom("nextprop", 3, 1, node, prev_name, pname);
if (rc != 1)
break;
/* find string offset */
@ -1615,7 +1642,7 @@ static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start,
l = call_prom("getproplen", 2, 1, node, pname);
/* sanity checks */
if (l < 0)
if (l == PROM_ERROR)
continue;
if (l > MAX_PROPERTY_LENGTH) {
prom_printf("WARNING: ignoring large property ");
@ -1763,17 +1790,18 @@ static void __init fixup_device_tree(void)
/* Some G5s have a missing interrupt definition, fix it up here */
u3 = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000"));
if ((long)u3 <= 0)
if (!PHANDLE_VALID(u3))
return;
i2c = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/i2c@f8001000"));
if ((long)i2c <= 0)
if (!PHANDLE_VALID(i2c))
return;
mpic = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/mpic@f8040000"));
if ((long)mpic <= 0)
if (!PHANDLE_VALID(mpic))
return;
/* check if proper rev of u3 */
if (prom_getprop(u3, "device-rev", &u3_rev, sizeof(u3_rev)) <= 0)
if (prom_getprop(u3, "device-rev", &u3_rev, sizeof(u3_rev))
== PROM_ERROR)
return;
if (u3_rev != 0x35)
return;
@ -1880,6 +1908,12 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4, unsigned long
prom_setprop(_prom->chosen, "linux,platform",
&getprop_rval, sizeof(getprop_rval));
/*
* On pSeries, inform the firmware about our capabilities
*/
if (RELOC(of_platform) & PLATFORM_PSERIES)
prom_send_capabilities();
/*
* On pSeries, copy the CPU hold code
*/

View file

@ -103,11 +103,6 @@ extern void unflatten_device_tree(void);
extern void smp_release_cpus(void);
unsigned long decr_overclock = 1;
unsigned long decr_overclock_proc0 = 1;
unsigned long decr_overclock_set = 0;
unsigned long decr_overclock_proc0_set = 0;
int have_of = 1;
int boot_cpuid = 0;
int boot_cpuid_phys = 0;
@ -1120,64 +1115,15 @@ void ppc64_dump_msg(unsigned int src, const char *msg)
printk("[dump]%04x %s\n", src, msg);
}
int set_spread_lpevents( char * str )
{
/* The parameter is the number of processors to share in processing lp events */
unsigned long i;
unsigned long val = simple_strtoul( str, NULL, 0 );
if ( ( val > 0 ) && ( val <= NR_CPUS ) ) {
for ( i=1; i<val; ++i )
paca[i].lpqueue_ptr = paca[0].lpqueue_ptr;
printk("lpevent processing spread over %ld processors\n", val);
}
else
printk("invalid spreaqd_lpevents %ld\n", val);
return 1;
}
/* This should only be called on processor 0 during calibrate decr */
void setup_default_decr(void)
{
struct paca_struct *lpaca = get_paca();
if ( decr_overclock_set && !decr_overclock_proc0_set )
decr_overclock_proc0 = decr_overclock;
lpaca->default_decr = tb_ticks_per_jiffy / decr_overclock_proc0;
lpaca->default_decr = tb_ticks_per_jiffy;
lpaca->next_jiffy_update_tb = get_tb() + tb_ticks_per_jiffy;
}
int set_decr_overclock_proc0( char * str )
{
unsigned long val = simple_strtoul( str, NULL, 0 );
if ( ( val >= 1 ) && ( val <= 48 ) ) {
decr_overclock_proc0_set = 1;
decr_overclock_proc0 = val;
printk("proc 0 decrementer overclock factor of %ld\n", val);
}
else
printk("invalid proc 0 decrementer overclock factor of %ld\n", val);
return 1;
}
int set_decr_overclock( char * str )
{
unsigned long val = simple_strtoul( str, NULL, 0 );
if ( ( val >= 1 ) && ( val <= 48 ) ) {
decr_overclock_set = 1;
decr_overclock = val;
printk("decrementer overclock factor of %ld\n", val);
}
else
printk("invalid decrementer overclock factor of %ld\n", val);
return 1;
}
__setup("spread_lpevents=", set_spread_lpevents );
__setup("decr_overclock_proc0=", set_decr_overclock_proc0 );
__setup("decr_overclock=", set_decr_overclock );
#ifndef CONFIG_PPC_ISERIES
/*
* This function can be used by platforms to "find" legacy serial ports.

View file

@ -334,7 +334,6 @@ void smp_call_function_interrupt(void)
}
}
extern unsigned long decr_overclock;
extern struct gettimeofday_struct do_gtod;
struct thread_info *current_set[NR_CPUS];
@ -491,7 +490,7 @@ int __devinit __cpu_up(unsigned int cpu)
if (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu))
return -EINVAL;
paca[cpu].default_decr = tb_ticks_per_jiffy / decr_overclock;
paca[cpu].default_decr = tb_ticks_per_jiffy;
if (!cpu_has_feature(CPU_FTR_SLB)) {
void *tmp;

View file

@ -113,7 +113,6 @@ void ppc64_enable_pmcs(void)
#ifdef CONFIG_PPC_PSERIES
unsigned long set, reset;
int ret;
unsigned int ctrl;
#endif /* CONFIG_PPC_PSERIES */
/* Only need to enable them once */
@ -167,11 +166,8 @@ void ppc64_enable_pmcs(void)
* On SMT machines we have to set the run latch in the ctrl register
* in order to make PMC6 spin.
*/
if (cpu_has_feature(CPU_FTR_SMT)) {
ctrl = mfspr(CTRLF);
ctrl |= RUNLATCH;
mtspr(CTRLT, ctrl);
}
if (cpu_has_feature(CPU_FTR_SMT))
ppc64_runlatch_on();
#endif /* CONFIG_PPC_PSERIES */
}

View file

@ -325,9 +325,7 @@ int timer_interrupt(struct pt_regs * regs)
irq_enter();
#ifndef CONFIG_PPC_ISERIES
profile_tick(CPU_PROFILING, regs);
#endif
lpaca->lppaca.int_dword.fields.decr_int = 0;

View file

@ -28,6 +28,7 @@
//#include <linux/kernel_stat.h>
#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/workqueue.h>
#include "appldata.h"
@ -133,9 +134,12 @@ static int appldata_interval = APPLDATA_CPU_INTERVAL;
static int appldata_timer_active;
/*
* Tasklet
* Work queue
*/
static struct tasklet_struct appldata_tasklet_struct;
static struct workqueue_struct *appldata_wq;
static void appldata_work_fn(void *data);
static DECLARE_WORK(appldata_work, appldata_work_fn, NULL);
/*
* Ops list
@ -144,11 +148,11 @@ static DEFINE_SPINLOCK(appldata_ops_lock);
static LIST_HEAD(appldata_ops_list);
/************************* timer, tasklet, DIAG ******************************/
/*************************** timer, work, DIAG *******************************/
/*
* appldata_timer_function()
*
* schedule tasklet and reschedule timer
* schedule work and reschedule timer
*/
static void appldata_timer_function(unsigned long data, struct pt_regs *regs)
{
@ -157,22 +161,22 @@ static void appldata_timer_function(unsigned long data, struct pt_regs *regs)
atomic_read(&appldata_expire_count));
if (atomic_dec_and_test(&appldata_expire_count)) {
atomic_set(&appldata_expire_count, num_online_cpus());
tasklet_schedule((struct tasklet_struct *) data);
queue_work(appldata_wq, (struct work_struct *) data);
}
}
/*
* appldata_tasklet_function()
* appldata_work_fn()
*
* call data gathering function for each (active) module
*/
static void appldata_tasklet_function(unsigned long data)
static void appldata_work_fn(void *data)
{
struct list_head *lh;
struct appldata_ops *ops;
int i;
P_DEBUG(" -= Tasklet =-\n");
P_DEBUG(" -= Work Queue =-\n");
i = 0;
spin_lock(&appldata_ops_lock);
list_for_each(lh, &appldata_ops_list) {
@ -231,7 +235,7 @@ static int appldata_diag(char record_nr, u16 function, unsigned long buffer,
: "=d" (ry) : "d" (&(appldata_parameter_list)) : "cc");
return (int) ry;
}
/********************** timer, tasklet, DIAG <END> ***************************/
/************************ timer, work, DIAG <END> ****************************/
/****************************** /proc stuff **********************************/
@ -411,7 +415,7 @@ appldata_generic_handler(ctl_table *ctl, int write, struct file *filp,
struct list_head *lh;
found = 0;
spin_lock_bh(&appldata_ops_lock);
spin_lock(&appldata_ops_lock);
list_for_each(lh, &appldata_ops_list) {
tmp_ops = list_entry(lh, struct appldata_ops, list);
if (&tmp_ops->ctl_table[2] == ctl) {
@ -419,15 +423,15 @@ appldata_generic_handler(ctl_table *ctl, int write, struct file *filp,
}
}
if (!found) {
spin_unlock_bh(&appldata_ops_lock);
spin_unlock(&appldata_ops_lock);
return -ENODEV;
}
ops = ctl->data;
if (!try_module_get(ops->owner)) { // protect this function
spin_unlock_bh(&appldata_ops_lock);
spin_unlock(&appldata_ops_lock);
return -ENODEV;
}
spin_unlock_bh(&appldata_ops_lock);
spin_unlock(&appldata_ops_lock);
if (!*lenp || *ppos) {
*lenp = 0;
@ -451,10 +455,11 @@ appldata_generic_handler(ctl_table *ctl, int write, struct file *filp,
return -EFAULT;
}
spin_lock_bh(&appldata_ops_lock);
spin_lock(&appldata_ops_lock);
if ((buf[0] == '1') && (ops->active == 0)) {
if (!try_module_get(ops->owner)) { // protect tasklet
spin_unlock_bh(&appldata_ops_lock);
// protect work queue callback
if (!try_module_get(ops->owner)) {
spin_unlock(&appldata_ops_lock);
module_put(ops->owner);
return -ENODEV;
}
@ -485,7 +490,7 @@ appldata_generic_handler(ctl_table *ctl, int write, struct file *filp,
}
module_put(ops->owner);
}
spin_unlock_bh(&appldata_ops_lock);
spin_unlock(&appldata_ops_lock);
out:
*lenp = len;
*ppos += len;
@ -529,7 +534,7 @@ int appldata_register_ops(struct appldata_ops *ops)
}
memset(ops->ctl_table, 0, 4*sizeof(struct ctl_table));
spin_lock_bh(&appldata_ops_lock);
spin_lock(&appldata_ops_lock);
list_for_each(lh, &appldata_ops_list) {
tmp_ops = list_entry(lh, struct appldata_ops, list);
P_DEBUG("register_ops loop: %i) name = %s, ctl = %i\n",
@ -541,18 +546,18 @@ int appldata_register_ops(struct appldata_ops *ops)
APPLDATA_PROC_NAME_LENGTH) == 0) {
P_ERROR("Name \"%s\" already registered!\n", ops->name);
kfree(ops->ctl_table);
spin_unlock_bh(&appldata_ops_lock);
spin_unlock(&appldata_ops_lock);
return -EBUSY;
}
if (tmp_ops->ctl_nr == ops->ctl_nr) {
P_ERROR("ctl_nr %i already registered!\n", ops->ctl_nr);
kfree(ops->ctl_table);
spin_unlock_bh(&appldata_ops_lock);
spin_unlock(&appldata_ops_lock);
return -EBUSY;
}
}
list_add(&ops->list, &appldata_ops_list);
spin_unlock_bh(&appldata_ops_lock);
spin_unlock(&appldata_ops_lock);
ops->ctl_table[0].ctl_name = CTL_APPLDATA;
ops->ctl_table[0].procname = appldata_proc_name;
@ -583,12 +588,12 @@ int appldata_register_ops(struct appldata_ops *ops)
*/
void appldata_unregister_ops(struct appldata_ops *ops)
{
spin_lock_bh(&appldata_ops_lock);
spin_lock(&appldata_ops_lock);
unregister_sysctl_table(ops->sysctl_header);
list_del(&ops->list);
kfree(ops->ctl_table);
ops->ctl_table = NULL;
spin_unlock_bh(&appldata_ops_lock);
spin_unlock(&appldata_ops_lock);
P_INFO("%s-ops unregistered!\n", ops->name);
}
/********************** module-ops management <END> **************************/
@ -602,7 +607,7 @@ appldata_online_cpu(int cpu)
init_virt_timer(&per_cpu(appldata_timer, cpu));
per_cpu(appldata_timer, cpu).function = appldata_timer_function;
per_cpu(appldata_timer, cpu).data = (unsigned long)
&appldata_tasklet_struct;
&appldata_work;
atomic_inc(&appldata_expire_count);
spin_lock(&appldata_timer_lock);
__appldata_vtimer_setup(APPLDATA_MOD_TIMER);
@ -615,7 +620,7 @@ appldata_offline_cpu(int cpu)
del_virt_timer(&per_cpu(appldata_timer, cpu));
if (atomic_dec_and_test(&appldata_expire_count)) {
atomic_set(&appldata_expire_count, num_online_cpus());
tasklet_schedule(&appldata_tasklet_struct);
queue_work(appldata_wq, &appldata_work);
}
spin_lock(&appldata_timer_lock);
__appldata_vtimer_setup(APPLDATA_MOD_TIMER);
@ -648,7 +653,7 @@ static struct notifier_block __devinitdata appldata_nb = {
/*
* appldata_init()
*
* init timer and tasklet, register /proc entries
* init timer, register /proc entries
*/
static int __init appldata_init(void)
{
@ -657,6 +662,12 @@ static int __init appldata_init(void)
P_DEBUG("sizeof(parameter_list) = %lu\n",
sizeof(struct appldata_parameter_list));
appldata_wq = create_singlethread_workqueue("appldata");
if (!appldata_wq) {
P_ERROR("Could not create work queue\n");
return -ENOMEM;
}
for_each_online_cpu(i)
appldata_online_cpu(i);
@ -670,7 +681,6 @@ static int __init appldata_init(void)
appldata_table[1].de->owner = THIS_MODULE;
#endif
tasklet_init(&appldata_tasklet_struct, appldata_tasklet_function, 0);
P_DEBUG("Base interface initialized.\n");
return 0;
}
@ -678,7 +688,7 @@ static int __init appldata_init(void)
/*
* appldata_exit()
*
* stop timer and tasklet, unregister /proc entries
* stop timer, unregister /proc entries
*/
static void __exit appldata_exit(void)
{
@ -690,7 +700,7 @@ static void __exit appldata_exit(void)
/*
* ops list should be empty, but just in case something went wrong...
*/
spin_lock_bh(&appldata_ops_lock);
spin_lock(&appldata_ops_lock);
list_for_each(lh, &appldata_ops_list) {
ops = list_entry(lh, struct appldata_ops, list);
rc = appldata_diag(ops->record_nr, APPLDATA_STOP_REC,
@ -700,7 +710,7 @@ static void __exit appldata_exit(void)
"return code: %d\n", ops->name, rc);
}
}
spin_unlock_bh(&appldata_ops_lock);
spin_unlock(&appldata_ops_lock);
for_each_online_cpu(i)
appldata_offline_cpu(i);
@ -709,7 +719,7 @@ static void __exit appldata_exit(void)
unregister_sysctl_table(appldata_sysctl_header);
tasklet_kill(&appldata_tasklet_struct);
destroy_workqueue(appldata_wq);
P_DEBUG("... module unloaded!\n");
}
/**************************** init / exit <END> ******************************/

View file

@ -68,7 +68,7 @@ struct appldata_mem_data {
u64 pgmajfault; /* page faults (major only) */
// <-- New in 2.6
} appldata_mem_data;
} __attribute__((packed)) appldata_mem_data;
static inline void appldata_debug_print(struct appldata_mem_data *mem_data)

View file

@ -57,7 +57,7 @@ struct appldata_net_sum_data {
u64 rx_dropped; /* no space in linux buffers */
u64 tx_dropped; /* no space available in linux */
u64 collisions; /* collisions while transmitting */
} appldata_net_sum_data;
} __attribute__((packed)) appldata_net_sum_data;
static inline void appldata_print_debug(struct appldata_net_sum_data *net_data)

View file

@ -49,7 +49,7 @@ struct appldata_os_per_cpu {
u32 per_cpu_softirq; /* ... spent in softirqs */
u32 per_cpu_iowait; /* ... spent while waiting for I/O */
// <-- New in 2.6
};
} __attribute__((packed));
struct appldata_os_data {
u64 timestamp;
@ -75,7 +75,7 @@ struct appldata_os_data {
/* per cpu data */
struct appldata_os_per_cpu os_cpu[0];
};
} __attribute__((packed));
static struct appldata_os_data *appldata_os_data;

View file

@ -40,6 +40,7 @@
#include <asm/pgalloc.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
#ifdef CONFIG_S390_SUPPORT
#include "compat_ptrace.h"
@ -130,13 +131,19 @@ static int
peek_user(struct task_struct *child, addr_t addr, addr_t data)
{
struct user *dummy = NULL;
addr_t offset, tmp;
addr_t offset, tmp, mask;
/*
* Stupid gdb peeks/pokes the access registers in 64 bit with
* an alignment of 4. Programmers from hell...
*/
if ((addr & 3) || addr > sizeof(struct user) - __ADDR_MASK)
mask = __ADDR_MASK;
#ifdef CONFIG_ARCH_S390X
if (addr >= (addr_t) &dummy->regs.acrs &&
addr < (addr_t) &dummy->regs.orig_gpr2)
mask = 3;
#endif
if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
return -EIO;
if (addr < (addr_t) &dummy->regs.acrs) {
@ -153,6 +160,16 @@ peek_user(struct task_struct *child, addr_t addr, addr_t data)
* access registers are stored in the thread structure
*/
offset = addr - (addr_t) &dummy->regs.acrs;
#ifdef CONFIG_ARCH_S390X
/*
* Very special case: old & broken 64 bit gdb reading
* from acrs[15]. Result is a 64 bit value. Read the
* 32 bit acrs[15] value and shift it by 32. Sick...
*/
if (addr == (addr_t) &dummy->regs.acrs[15])
tmp = ((unsigned long) child->thread.acrs[15]) << 32;
else
#endif
tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset);
} else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
@ -167,6 +184,9 @@ peek_user(struct task_struct *child, addr_t addr, addr_t data)
*/
offset = addr - (addr_t) &dummy->regs.fp_regs;
tmp = *(addr_t *)((addr_t) &child->thread.fp_regs + offset);
if (addr == (addr_t) &dummy->regs.fp_regs.fpc)
tmp &= (unsigned long) FPC_VALID_MASK
<< (BITS_PER_LONG - 32);
} else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
/*
@ -191,13 +211,19 @@ static int
poke_user(struct task_struct *child, addr_t addr, addr_t data)
{
struct user *dummy = NULL;
addr_t offset;
addr_t offset, mask;
/*
* Stupid gdb peeks/pokes the access registers in 64 bit with
* an alignment of 4. Programmers from hell indeed...
*/
if ((addr & 3) || addr > sizeof(struct user) - __ADDR_MASK)
mask = __ADDR_MASK;
#ifdef CONFIG_ARCH_S390X
if (addr >= (addr_t) &dummy->regs.acrs &&
addr < (addr_t) &dummy->regs.orig_gpr2)
mask = 3;
#endif
if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
return -EIO;
if (addr < (addr_t) &dummy->regs.acrs) {
@ -224,6 +250,17 @@ poke_user(struct task_struct *child, addr_t addr, addr_t data)
* access registers are stored in the thread structure
*/
offset = addr - (addr_t) &dummy->regs.acrs;
#ifdef CONFIG_ARCH_S390X
/*
* Very special case: old & broken 64 bit gdb writing
* to acrs[15] with a 64 bit value. Ignore the lower
* half of the value and write the upper 32 bit to
* acrs[15]. Sick...
*/
if (addr == (addr_t) &dummy->regs.acrs[15])
child->thread.acrs[15] = (unsigned int) (data >> 32);
else
#endif
*(addr_t *)((addr_t) &child->thread.acrs + offset) = data;
} else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
@ -237,7 +274,8 @@ poke_user(struct task_struct *child, addr_t addr, addr_t data)
* floating point regs. are stored in the thread structure
*/
if (addr == (addr_t) &dummy->regs.fp_regs.fpc &&
(data & ~FPC_VALID_MASK) != 0)
(data & ~((unsigned long) FPC_VALID_MASK
<< (BITS_PER_LONG - 32))) != 0)
return -EINVAL;
offset = addr - (addr_t) &dummy->regs.fp_regs;
*(addr_t *)((addr_t) &child->thread.fp_regs + offset) = data;
@ -722,6 +760,13 @@ syscall_trace(struct pt_regs *regs, int entryexit)
ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
? 0x80 : 0));
/*
* If the debuffer has set an invalid system call number,
* we prepare to skip the system call restart handling.
*/
if (!entryexit && regs->gprs[2] >= NR_syscalls)
regs->trap = -1;
/*
* this isn't the same as continuing with a signal, but it will do
* for normal use. strace only continues with a signal if the

View file

@ -207,7 +207,7 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection)
* we are not in an interrupt and that there is a
* user context.
*/
if (user_address == 0 || in_interrupt() || !mm)
if (user_address == 0 || in_atomic() || !mm)
goto no_context;
/*

View file

@ -196,6 +196,34 @@ static iopte_t *alloc_consistent_cluster(struct pci_iommu *iommu, unsigned long
return NULL;
}
static int iommu_alloc_ctx(struct pci_iommu *iommu)
{
int lowest = iommu->ctx_lowest_free;
int sz = IOMMU_NUM_CTXS - lowest;
int n = find_next_zero_bit(iommu->ctx_bitmap, sz, lowest);
if (unlikely(n == sz)) {
n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1);
if (unlikely(n == lowest)) {
printk(KERN_WARNING "IOMMU: Ran out of contexts.\n");
n = 0;
}
}
if (n)
__set_bit(n, iommu->ctx_bitmap);
return n;
}
static inline void iommu_free_ctx(struct pci_iommu *iommu, int ctx)
{
if (likely(ctx)) {
__clear_bit(ctx, iommu->ctx_bitmap);
if (ctx < iommu->ctx_lowest_free)
iommu->ctx_lowest_free = ctx;
}
}
/* Allocate and map kernel buffer of size SIZE using consistent mode
* DMA for PCI device PDEV. Return non-NULL cpu-side address if
* successful and set *DMA_ADDRP to the PCI side dma address.
@ -236,7 +264,7 @@ void *pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_ad
npages = size >> IO_PAGE_SHIFT;
ctx = 0;
if (iommu->iommu_ctxflush)
ctx = iommu->iommu_cur_ctx++;
ctx = iommu_alloc_ctx(iommu);
first_page = __pa(first_page);
while (npages--) {
iopte_val(*iopte) = (IOPTE_CONSISTENT(ctx) |
@ -317,6 +345,8 @@ void pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_
}
}
iommu_free_ctx(iommu, ctx);
spin_unlock_irqrestore(&iommu->lock, flags);
order = get_order(size);
@ -360,7 +390,7 @@ dma_addr_t pci_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direct
base_paddr = __pa(oaddr & IO_PAGE_MASK);
ctx = 0;
if (iommu->iommu_ctxflush)
ctx = iommu->iommu_cur_ctx++;
ctx = iommu_alloc_ctx(iommu);
if (strbuf->strbuf_enabled)
iopte_protection = IOPTE_STREAMING(ctx);
else
@ -380,39 +410,53 @@ bad:
return PCI_DMA_ERROR_CODE;
}
static void pci_strbuf_flush(struct pci_strbuf *strbuf, struct pci_iommu *iommu, u32 vaddr, unsigned long ctx, unsigned long npages)
static void pci_strbuf_flush(struct pci_strbuf *strbuf, struct pci_iommu *iommu, u32 vaddr, unsigned long ctx, unsigned long npages, int direction)
{
int limit;
PCI_STC_FLUSHFLAG_INIT(strbuf);
if (strbuf->strbuf_ctxflush &&
iommu->iommu_ctxflush) {
unsigned long matchreg, flushreg;
u64 val;
flushreg = strbuf->strbuf_ctxflush;
matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx);
limit = 100000;
pci_iommu_write(flushreg, ctx);
for(;;) {
if (((long)pci_iommu_read(matchreg)) >= 0L)
break;
limit--;
if (!limit)
break;
udelay(1);
val = pci_iommu_read(matchreg);
val &= 0xffff;
if (!val)
goto do_flush_sync;
while (val) {
if (val & 0x1)
pci_iommu_write(flushreg, ctx);
val >>= 1;
}
if (!limit)
val = pci_iommu_read(matchreg);
if (unlikely(val)) {
printk(KERN_WARNING "pci_strbuf_flush: ctx flush "
"timeout vaddr[%08x] ctx[%lx]\n",
vaddr, ctx);
"timeout matchreg[%lx] ctx[%lx]\n",
val, ctx);
goto do_page_flush;
}
} else {
unsigned long i;
do_page_flush:
for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
pci_iommu_write(strbuf->strbuf_pflush, vaddr);
}
do_flush_sync:
/* If the device could not have possibly put dirty data into
* the streaming cache, no flush-flag synchronization needs
* to be performed.
*/
if (direction == PCI_DMA_TODEVICE)
return;
PCI_STC_FLUSHFLAG_INIT(strbuf);
pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
(void) pci_iommu_read(iommu->write_complete_reg);
@ -466,7 +510,7 @@ void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int
/* Step 1: Kick data out of streaming buffers if necessary. */
if (strbuf->strbuf_enabled)
pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages);
pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
/* Step 2: Clear out first TSB entry. */
iopte_make_dummy(iommu, base);
@ -474,6 +518,8 @@ void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int
free_streaming_cluster(iommu, bus_addr - iommu->page_table_map_base,
npages, ctx);
iommu_free_ctx(iommu, ctx);
spin_unlock_irqrestore(&iommu->lock, flags);
}
@ -613,7 +659,7 @@ int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int
/* Step 4: Choose a context if necessary. */
ctx = 0;
if (iommu->iommu_ctxflush)
ctx = iommu->iommu_cur_ctx++;
ctx = iommu_alloc_ctx(iommu);
/* Step 5: Create the mappings. */
if (strbuf->strbuf_enabled)
@ -678,7 +724,7 @@ void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems,
/* Step 1: Kick data out of streaming buffers if necessary. */
if (strbuf->strbuf_enabled)
pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages);
pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
/* Step 2: Clear out first TSB entry. */
iopte_make_dummy(iommu, base);
@ -686,6 +732,8 @@ void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems,
free_streaming_cluster(iommu, bus_addr - iommu->page_table_map_base,
npages, ctx);
iommu_free_ctx(iommu, ctx);
spin_unlock_irqrestore(&iommu->lock, flags);
}
@ -724,7 +772,7 @@ void pci_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size
}
/* Step 2: Kick data out of streaming buffers. */
pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages);
pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
spin_unlock_irqrestore(&iommu->lock, flags);
}
@ -768,7 +816,7 @@ void pci_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, i
i--;
npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length)
- bus_addr) >> IO_PAGE_SHIFT;
pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages);
pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
spin_unlock_irqrestore(&iommu->lock, flags);
}

View file

@ -1212,7 +1212,7 @@ static void __init psycho_iommu_init(struct pci_controller_info *p)
/* Setup initial software IOMMU state. */
spin_lock_init(&iommu->lock);
iommu->iommu_cur_ctx = 0;
iommu->ctx_lowest_free = 1;
/* Register addresses. */
iommu->iommu_control = p->pbm_A.controller_regs + PSYCHO_IOMMU_CONTROL;

View file

@ -1265,7 +1265,7 @@ static void __init sabre_iommu_init(struct pci_controller_info *p,
/* Setup initial software IOMMU state. */
spin_lock_init(&iommu->lock);
iommu->iommu_cur_ctx = 0;
iommu->ctx_lowest_free = 1;
/* Register addresses. */
iommu->iommu_control = p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL;

View file

@ -1753,7 +1753,7 @@ static void schizo_pbm_iommu_init(struct pci_pbm_info *pbm)
/* Setup initial software IOMMU state. */
spin_lock_init(&iommu->lock);
iommu->iommu_cur_ctx = 0;
iommu->ctx_lowest_free = 1;
/* Register addresses, SCHIZO has iommu ctx flushing. */
iommu->iommu_control = pbm->pbm_regs + SCHIZO_IOMMU_CONTROL;

View file

@ -117,17 +117,25 @@ static void iommu_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages
#define STRBUF_TAG_VALID 0x02UL
static void sbus_strbuf_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages)
static void sbus_strbuf_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages, int direction)
{
unsigned long n;
int limit;
iommu->strbuf_flushflag = 0UL;
n = npages;
while (n--)
upa_writeq(base + (n << IO_PAGE_SHIFT),
iommu->strbuf_regs + STRBUF_PFLUSH);
/* If the device could not have possibly put dirty data into
* the streaming cache, no flush-flag synchronization needs
* to be performed.
*/
if (direction == SBUS_DMA_TODEVICE)
return;
iommu->strbuf_flushflag = 0UL;
/* Whoopee cushion! */
upa_writeq(__pa(&iommu->strbuf_flushflag),
iommu->strbuf_regs + STRBUF_FSYNC);
@ -421,7 +429,7 @@ void sbus_unmap_single(struct sbus_dev *sdev, dma_addr_t dma_addr, size_t size,
spin_lock_irqsave(&iommu->lock, flags);
free_streaming_cluster(iommu, dma_base, size >> IO_PAGE_SHIFT);
sbus_strbuf_flush(iommu, dma_base, size >> IO_PAGE_SHIFT);
sbus_strbuf_flush(iommu, dma_base, size >> IO_PAGE_SHIFT, direction);
spin_unlock_irqrestore(&iommu->lock, flags);
}
@ -584,7 +592,7 @@ void sbus_unmap_sg(struct sbus_dev *sdev, struct scatterlist *sg, int nents, int
iommu = sdev->bus->iommu;
spin_lock_irqsave(&iommu->lock, flags);
free_streaming_cluster(iommu, dvma_base, size >> IO_PAGE_SHIFT);
sbus_strbuf_flush(iommu, dvma_base, size >> IO_PAGE_SHIFT);
sbus_strbuf_flush(iommu, dvma_base, size >> IO_PAGE_SHIFT, direction);
spin_unlock_irqrestore(&iommu->lock, flags);
}
@ -596,7 +604,7 @@ void sbus_dma_sync_single_for_cpu(struct sbus_dev *sdev, dma_addr_t base, size_t
size = (IO_PAGE_ALIGN(base + size) - (base & IO_PAGE_MASK));
spin_lock_irqsave(&iommu->lock, flags);
sbus_strbuf_flush(iommu, base & IO_PAGE_MASK, size >> IO_PAGE_SHIFT);
sbus_strbuf_flush(iommu, base & IO_PAGE_MASK, size >> IO_PAGE_SHIFT, direction);
spin_unlock_irqrestore(&iommu->lock, flags);
}
@ -620,7 +628,7 @@ void sbus_dma_sync_sg_for_cpu(struct sbus_dev *sdev, struct scatterlist *sg, int
size = IO_PAGE_ALIGN(sg[i].dma_address + sg[i].dma_length) - base;
spin_lock_irqsave(&iommu->lock, flags);
sbus_strbuf_flush(iommu, base, size >> IO_PAGE_SHIFT);
sbus_strbuf_flush(iommu, base, size >> IO_PAGE_SHIFT, direction);
spin_unlock_irqrestore(&iommu->lock, flags);
}

View file

@ -305,6 +305,7 @@ config HPET_TIMER
config X86_PM_TIMER
bool "PM timer"
depends on ACPI
default y
help
Support the ACPI PM timer for time keeping. This is slow,

View file

@ -37,6 +37,7 @@
#include <asm/desc.h>
#include <asm/proto.h>
#include <asm/mach_apic.h>
#include <asm/acpi.h>
#define __apicdebuginit __init

View file

@ -30,6 +30,7 @@
#include <asm/pgalloc.h>
#include <asm/io_apic.h>
#include <asm/proto.h>
#include <asm/acpi.h>
/* Have we found an MP table */
int smp_found_config;

View file

@ -27,7 +27,9 @@
#include <linux/bcd.h>
#include <linux/kallsyms.h>
#include <linux/acpi.h>
#ifdef CONFIG_ACPI
#include <acpi/achware.h> /* for PM timer frequency */
#endif
#include <asm/8253pit.h>
#include <asm/pgtable.h>
#include <asm/vsyscall.h>

View file

@ -39,7 +39,8 @@ ifeq ($(CONFIG_ATM_FORE200E_PCA),y)
fore_200e-objs += fore200e_pca_fw.o
# guess the target endianess to choose the right PCA-200E firmware image
ifeq ($(CONFIG_ATM_FORE200E_PCA_DEFAULT_FW),y)
CONFIG_ATM_FORE200E_PCA_FW = $(shell if test -n "`$(CC) -E -dM $(src)/../../include/asm/byteorder.h | grep ' __LITTLE_ENDIAN '`"; then echo $(obj)/pca200e.bin; else echo $(obj)/pca200e_ecd.bin2; fi)
byteorder.h := include$(if $(patsubst $(srctree),,$(objtree)),2)/asm/byteorder.h
CONFIG_ATM_FORE200E_PCA_FW := $(obj)/pca200e$(if $(shell $(CC) -E -dM $(byteorder.h) | grep ' __LITTLE_ENDIAN '),.bin,_ecd.bin2)
endif
endif

View file

@ -383,8 +383,7 @@ fore200e_shutdown(struct fore200e* fore200e)
switch(fore200e->state) {
case FORE200E_STATE_COMPLETE:
if (fore200e->stats)
kfree(fore200e->stats);
kfree(fore200e->stats);
case FORE200E_STATE_IRQ:
free_irq(fore200e->irq, fore200e->atm_dev);
@ -963,8 +962,7 @@ fore200e_tx_irq(struct fore200e* fore200e)
entry, txq->tail, entry->vc_map, entry->skb);
/* free copy of misaligned data */
if (entry->data)
kfree(entry->data);
kfree(entry->data);
/* remove DMA mapping */
fore200e->bus->dma_unmap(fore200e, entry->tpd->tsd[ 0 ].buffer, entry->tpd->tsd[ 0 ].length,

View file

@ -412,8 +412,7 @@ he_init_one(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent)
init_one_failure:
if (atm_dev)
atm_dev_deregister(atm_dev);
if (he_dev)
kfree(he_dev);
kfree(he_dev);
pci_disable_device(pci_dev);
return err;
}
@ -2534,8 +2533,7 @@ he_open(struct atm_vcc *vcc)
open_failed:
if (err) {
if (he_vcc)
kfree(he_vcc);
kfree(he_vcc);
clear_bit(ATM_VF_ADDR, &vcc->flags);
}
else

View file

@ -676,10 +676,10 @@ static int __devinit ns_init_card(int i, struct pci_dev *pcidev)
PRINTK("nicstar%d: RSQ base at 0x%x.\n", i, (u32) card->rsq.base);
/* Initialize SCQ0, the only VBR SCQ used */
card->scq1 = (scq_info *) NULL;
card->scq2 = (scq_info *) NULL;
card->scq1 = NULL;
card->scq2 = NULL;
card->scq0 = get_scq(VBR_SCQSIZE, NS_VRSCD0);
if (card->scq0 == (scq_info *) NULL)
if (card->scq0 == NULL)
{
printk("nicstar%d: can't get SCQ0.\n", i);
error = 12;
@ -993,24 +993,24 @@ static scq_info *get_scq(int size, u32 scd)
int i;
if (size != VBR_SCQSIZE && size != CBR_SCQSIZE)
return (scq_info *) NULL;
return NULL;
scq = (scq_info *) kmalloc(sizeof(scq_info), GFP_KERNEL);
if (scq == (scq_info *) NULL)
return (scq_info *) NULL;
if (scq == NULL)
return NULL;
scq->org = kmalloc(2 * size, GFP_KERNEL);
if (scq->org == NULL)
{
kfree(scq);
return (scq_info *) NULL;
return NULL;
}
scq->skb = (struct sk_buff **) kmalloc(sizeof(struct sk_buff *) *
(size / NS_SCQE_SIZE), GFP_KERNEL);
if (scq->skb == (struct sk_buff **) NULL)
if (scq->skb == NULL)
{
kfree(scq->org);
kfree(scq);
return (scq_info *) NULL;
return NULL;
}
scq->num_entries = size / NS_SCQE_SIZE;
scq->base = (ns_scqe *) ALIGN_ADDRESS(scq->org, size);
@ -1498,7 +1498,7 @@ static int ns_open(struct atm_vcc *vcc)
vc->cbr_scd = NS_FRSCD + frscdi * NS_FRSCD_SIZE;
scq = get_scq(CBR_SCQSIZE, vc->cbr_scd);
if (scq == (scq_info *) NULL)
if (scq == NULL)
{
PRINTK("nicstar%d: can't get fixed rate SCQ.\n", card->index);
card->scd2vc[frscdi] = NULL;

View file

@ -902,7 +902,7 @@ static void close_tx(struct atm_vcc *vcc)
zatm_dev->tx_bw += vcc->qos.txtp.min_pcr;
dealloc_shaper(vcc->dev,zatm_vcc->shaper);
}
if (zatm_vcc->ring) kfree(zatm_vcc->ring);
kfree(zatm_vcc->ring);
}
@ -1339,12 +1339,9 @@ static int __init zatm_start(struct atm_dev *dev)
return 0;
out:
for (i = 0; i < NR_MBX; i++)
if (zatm_dev->mbx_start[i] != 0)
kfree((void *) zatm_dev->mbx_start[i]);
if (zatm_dev->rx_map != NULL)
kfree(zatm_dev->rx_map);
if (zatm_dev->tx_map != NULL)
kfree(zatm_dev->tx_map);
kfree(zatm_dev->mbx_start[i]);
kfree(zatm_dev->rx_map);
kfree(zatm_dev->tx_map);
free_irq(zatm_dev->irq, dev);
return error;
}

File diff suppressed because it is too large Load diff

View file

@ -278,6 +278,8 @@ void agp3_generic_cleanup(void);
#define AGP_GENERIC_SIZES_ENTRIES 11
extern struct aper_size_info_16 agp3_generic_sizes[];
#define virt_to_gart(x) (phys_to_gart(virt_to_phys(x)))
#define gart_to_virt(x) (phys_to_virt(gart_to_phys(x)))
extern int agp_off;
extern int agp_try_unsupported_boot;

View file

@ -150,7 +150,7 @@ static void *m1541_alloc_page(struct agp_bridge_data *bridge)
pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, &temp);
pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL,
(((temp & ALI_CACHE_FLUSH_ADDR_MASK) |
virt_to_phys(addr)) | ALI_CACHE_FLUSH_EN ));
virt_to_gart(addr)) | ALI_CACHE_FLUSH_EN ));
return addr;
}
@ -174,7 +174,7 @@ static void m1541_destroy_page(void * addr)
pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, &temp);
pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL,
(((temp & ALI_CACHE_FLUSH_ADDR_MASK) |
virt_to_phys(addr)) | ALI_CACHE_FLUSH_EN));
virt_to_gart(addr)) | ALI_CACHE_FLUSH_EN));
agp_generic_destroy_page(addr);
}

View file

@ -43,7 +43,7 @@ static int amd_create_page_map(struct amd_page_map *page_map)
SetPageReserved(virt_to_page(page_map->real));
global_cache_flush();
page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real),
page_map->remapped = ioremap_nocache(virt_to_gart(page_map->real),
PAGE_SIZE);
if (page_map->remapped == NULL) {
ClearPageReserved(virt_to_page(page_map->real));
@ -154,7 +154,7 @@ static int amd_create_gatt_table(struct agp_bridge_data *bridge)
agp_bridge->gatt_table_real = (u32 *)page_dir.real;
agp_bridge->gatt_table = (u32 __iomem *)page_dir.remapped;
agp_bridge->gatt_bus_addr = virt_to_phys(page_dir.real);
agp_bridge->gatt_bus_addr = virt_to_gart(page_dir.real);
/* Get the address for the gart region.
* This is a bus address even on the alpha, b/c its
@ -167,7 +167,7 @@ static int amd_create_gatt_table(struct agp_bridge_data *bridge)
/* Calculate the agp offset */
for (i = 0; i < value->num_entries / 1024; i++, addr += 0x00400000) {
writel(virt_to_phys(amd_irongate_private.gatt_pages[i]->real) | 1,
writel(virt_to_gart(amd_irongate_private.gatt_pages[i]->real) | 1,
page_dir.remapped+GET_PAGE_DIR_OFF(addr));
readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */
}

View file

@ -219,7 +219,7 @@ static struct aper_size_info_32 amd_8151_sizes[7] =
static int amd_8151_configure(void)
{
unsigned long gatt_bus = virt_to_phys(agp_bridge->gatt_table_real);
unsigned long gatt_bus = virt_to_gart(agp_bridge->gatt_table_real);
/* Configure AGP regs in each x86-64 host bridge. */
for_each_nb() {
@ -591,7 +591,7 @@ static void __devexit agp_amd64_remove(struct pci_dev *pdev)
{
struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
release_mem_region(virt_to_phys(bridge->gatt_table_real),
release_mem_region(virt_to_gart(bridge->gatt_table_real),
amd64_aperture_sizes[bridge->aperture_size_idx].size);
agp_remove_bridge(bridge);
agp_put_bridge(bridge);

View file

@ -61,7 +61,7 @@ static int ati_create_page_map(ati_page_map *page_map)
SetPageReserved(virt_to_page(page_map->real));
err = map_page_into_agp(virt_to_page(page_map->real));
page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real),
page_map->remapped = ioremap_nocache(virt_to_gart(page_map->real),
PAGE_SIZE);
if (page_map->remapped == NULL || err) {
ClearPageReserved(virt_to_page(page_map->real));
@ -343,7 +343,7 @@ static int ati_create_gatt_table(struct agp_bridge_data *bridge)
agp_bridge->gatt_table_real = (u32 *)page_dir.real;
agp_bridge->gatt_table = (u32 __iomem *) page_dir.remapped;
agp_bridge->gatt_bus_addr = virt_to_bus(page_dir.real);
agp_bridge->gatt_bus_addr = virt_to_gart(page_dir.real);
/* Write out the size register */
current_size = A_SIZE_LVL2(agp_bridge->current_size);
@ -373,7 +373,7 @@ static int ati_create_gatt_table(struct agp_bridge_data *bridge)
/* Calculate the agp offset */
for(i = 0; i < value->num_entries / 1024; i++, addr += 0x00400000) {
writel(virt_to_bus(ati_generic_private.gatt_pages[i]->real) | 1,
writel(virt_to_gart(ati_generic_private.gatt_pages[i]->real) | 1,
page_dir.remapped+GET_PAGE_DIR_OFF(addr));
readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */
}

View file

@ -148,7 +148,7 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge)
return -ENOMEM;
}
bridge->scratch_page_real = virt_to_phys(addr);
bridge->scratch_page_real = virt_to_gart(addr);
bridge->scratch_page =
bridge->driver->mask_memory(bridge, bridge->scratch_page_real, 0);
}
@ -189,7 +189,7 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge)
err_out:
if (bridge->driver->needs_scratch_page)
bridge->driver->agp_destroy_page(
phys_to_virt(bridge->scratch_page_real));
gart_to_virt(bridge->scratch_page_real));
if (got_gatt)
bridge->driver->free_gatt_table(bridge);
if (got_keylist) {
@ -214,7 +214,7 @@ static void agp_backend_cleanup(struct agp_bridge_data *bridge)
if (bridge->driver->agp_destroy_page &&
bridge->driver->needs_scratch_page)
bridge->driver->agp_destroy_page(
phys_to_virt(bridge->scratch_page_real));
gart_to_virt(bridge->scratch_page_real));
}
/* When we remove the global variable agp_bridge from all drivers

View file

@ -219,7 +219,7 @@ static int efficeon_create_gatt_table(struct agp_bridge_data *bridge)
efficeon_private.l1_table[index] = page;
value = __pa(page) | pati | present | index;
value = virt_to_gart(page) | pati | present | index;
pci_write_config_dword(agp_bridge->dev,
EFFICEON_ATTPAGE, value);

View file

@ -153,7 +153,7 @@ void agp_free_memory(struct agp_memory *curr)
}
if (curr->page_count != 0) {
for (i = 0; i < curr->page_count; i++) {
curr->bridge->driver->agp_destroy_page(phys_to_virt(curr->memory[i]));
curr->bridge->driver->agp_destroy_page(gart_to_virt(curr->memory[i]));
}
}
agp_free_key(curr->key);
@ -209,7 +209,7 @@ struct agp_memory *agp_allocate_memory(struct agp_bridge_data *bridge,
agp_free_memory(new);
return NULL;
}
new->memory[i] = virt_to_phys(addr);
new->memory[i] = virt_to_gart(addr);
new->page_count++;
}
new->bridge = bridge;
@ -295,19 +295,6 @@ int agp_num_entries(void)
EXPORT_SYMBOL_GPL(agp_num_entries);
static int check_bridge_mode(struct pci_dev *dev)
{
u32 agp3;
u8 cap_ptr;
cap_ptr = pci_find_capability(dev, PCI_CAP_ID_AGP);
pci_read_config_dword(dev, cap_ptr+AGPSTAT, &agp3);
if (agp3 & AGPSTAT_MODE_3_0)
return 1;
return 0;
}
/**
* agp_copy_info - copy bridge state information
*
@ -328,7 +315,7 @@ int agp_copy_info(struct agp_bridge_data *bridge, struct agp_kern_info *info)
info->version.minor = bridge->version->minor;
info->chipset = SUPPORTED;
info->device = bridge->dev;
if (check_bridge_mode(bridge->dev))
if (bridge->mode & AGPSTAT_MODE_3_0)
info->mode = bridge->mode & ~AGP3_RESERVED_MASK;
else
info->mode = bridge->mode & ~AGP2_RESERVED_MASK;
@ -661,7 +648,7 @@ u32 agp_collect_device_status(struct agp_bridge_data *bridge, u32 requested_mode
bridge_agpstat &= ~AGPSTAT_FW;
/* Check to see if we are operating in 3.0 mode */
if (check_bridge_mode(agp_bridge->dev))
if (agp_bridge->mode & AGPSTAT_MODE_3_0)
agp_v3_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat);
else
agp_v2_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat);
@ -732,7 +719,7 @@ void agp_generic_enable(struct agp_bridge_data *bridge, u32 requested_mode)
/* Do AGP version specific frobbing. */
if (bridge->major_version >= 3) {
if (check_bridge_mode(bridge->dev)) {
if (bridge->mode & AGPSTAT_MODE_3_0) {
/* If we have 3.5, we can do the isoch stuff. */
if (bridge->minor_version >= 5)
agp_3_5_enable(bridge);
@ -806,8 +793,7 @@ int agp_generic_create_gatt_table(struct agp_bridge_data *bridge)
break;
}
table = (char *) __get_free_pages(GFP_KERNEL,
page_order);
table = alloc_gatt_pages(page_order);
if (table == NULL) {
i++;
@ -838,7 +824,7 @@ int agp_generic_create_gatt_table(struct agp_bridge_data *bridge)
size = ((struct aper_size_info_fixed *) temp)->size;
page_order = ((struct aper_size_info_fixed *) temp)->page_order;
num_entries = ((struct aper_size_info_fixed *) temp)->num_entries;
table = (char *) __get_free_pages(GFP_KERNEL, page_order);
table = alloc_gatt_pages(page_order);
}
if (table == NULL)
@ -853,7 +839,7 @@ int agp_generic_create_gatt_table(struct agp_bridge_data *bridge)
agp_gatt_table = (void *)table;
bridge->driver->cache_flush();
bridge->gatt_table = ioremap_nocache(virt_to_phys(table),
bridge->gatt_table = ioremap_nocache(virt_to_gart(table),
(PAGE_SIZE * (1 << page_order)));
bridge->driver->cache_flush();
@ -861,11 +847,11 @@ int agp_generic_create_gatt_table(struct agp_bridge_data *bridge)
for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
ClearPageReserved(page);
free_pages((unsigned long) table, page_order);
free_gatt_pages(table, page_order);
return -ENOMEM;
}
bridge->gatt_bus_addr = virt_to_phys(bridge->gatt_table_real);
bridge->gatt_bus_addr = virt_to_gart(bridge->gatt_table_real);
/* AK: bogus, should encode addresses > 4GB */
for (i = 0; i < num_entries; i++) {
@ -919,7 +905,7 @@ int agp_generic_free_gatt_table(struct agp_bridge_data *bridge)
for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
ClearPageReserved(page);
free_pages((unsigned long) bridge->gatt_table_real, page_order);
free_gatt_pages(bridge->gatt_table_real, page_order);
agp_gatt_table = NULL;
bridge->gatt_table = NULL;

View file

@ -110,7 +110,7 @@ static int __init hp_zx1_ioc_shared(void)
hp->gart_size = HP_ZX1_GART_SIZE;
hp->gatt_entries = hp->gart_size / hp->io_page_size;
hp->io_pdir = phys_to_virt(readq(hp->ioc_regs+HP_ZX1_PDIR_BASE));
hp->io_pdir = gart_to_virt(readq(hp->ioc_regs+HP_ZX1_PDIR_BASE));
hp->gatt = &hp->io_pdir[HP_ZX1_IOVA_TO_PDIR(hp->gart_base)];
if (hp->gatt[0] != HP_ZX1_SBA_IOMMU_COOKIE) {
@ -248,7 +248,7 @@ hp_zx1_configure (void)
agp_bridge->mode = readl(hp->lba_regs+hp->lba_cap_offset+PCI_AGP_STATUS);
if (hp->io_pdir_owner) {
writel(virt_to_phys(hp->io_pdir), hp->ioc_regs+HP_ZX1_PDIR_BASE);
writel(virt_to_gart(hp->io_pdir), hp->ioc_regs+HP_ZX1_PDIR_BASE);
readl(hp->ioc_regs+HP_ZX1_PDIR_BASE);
writel(hp->io_tlb_ps, hp->ioc_regs+HP_ZX1_TCNFG);
readl(hp->ioc_regs+HP_ZX1_TCNFG);

View file

@ -372,7 +372,7 @@ static int i460_alloc_large_page (struct lp_desc *lp)
}
memset(lp->alloced_map, 0, map_size);
lp->paddr = virt_to_phys(lpage);
lp->paddr = virt_to_gart(lpage);
lp->refcount = 0;
atomic_add(I460_KPAGES_PER_IOPAGE, &agp_bridge->current_memory_agp);
return 0;
@ -383,7 +383,7 @@ static void i460_free_large_page (struct lp_desc *lp)
kfree(lp->alloced_map);
lp->alloced_map = NULL;
free_pages((unsigned long) phys_to_virt(lp->paddr), I460_IO_PAGE_SHIFT - PAGE_SHIFT);
free_pages((unsigned long) gart_to_virt(lp->paddr), I460_IO_PAGE_SHIFT - PAGE_SHIFT);
atomic_sub(I460_KPAGES_PER_IOPAGE, &agp_bridge->current_memory_agp);
}

View file

@ -286,7 +286,7 @@ static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
if (new == NULL)
return NULL;
new->memory[0] = virt_to_phys(addr);
new->memory[0] = virt_to_gart(addr);
if (pg_count == 4) {
/* kludge to get 4 physical pages for ARGB cursor */
new->memory[1] = new->memory[0] + PAGE_SIZE;
@ -329,10 +329,10 @@ static void intel_i810_free_by_type(struct agp_memory *curr)
agp_free_key(curr->key);
if(curr->type == AGP_PHYS_MEMORY) {
if (curr->page_count == 4)
i8xx_destroy_pages(phys_to_virt(curr->memory[0]));
i8xx_destroy_pages(gart_to_virt(curr->memory[0]));
else
agp_bridge->driver->agp_destroy_page(
phys_to_virt(curr->memory[0]));
gart_to_virt(curr->memory[0]));
vfree(curr->memory);
}
kfree(curr);
@ -418,7 +418,8 @@ static void intel_i830_init_gtt_entries(void)
case I915_GMCH_GMS_STOLEN_48M:
/* Check it's really I915G */
if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB ||
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB)
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB ||
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB)
gtt_entries = MB(48) - KB(size);
else
gtt_entries = 0;
@ -426,7 +427,8 @@ static void intel_i830_init_gtt_entries(void)
case I915_GMCH_GMS_STOLEN_64M:
/* Check it's really I915G */
if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB ||
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB)
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB ||
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB)
gtt_entries = MB(64) - KB(size);
else
gtt_entries = 0;
@ -1662,6 +1664,14 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
}
name = "915GM";
break;
case PCI_DEVICE_ID_INTEL_82945G_HB:
if (find_i830(PCI_DEVICE_ID_INTEL_82945G_IG)) {
bridge->driver = &intel_915_driver;
} else {
bridge->driver = &intel_845_driver;
}
name = "945G";
break;
case PCI_DEVICE_ID_INTEL_7505_0:
bridge->driver = &intel_7505_driver;
name = "E7505";
@ -1801,6 +1811,7 @@ static struct pci_device_id agp_intel_pci_table[] = {
ID(PCI_DEVICE_ID_INTEL_7205_0),
ID(PCI_DEVICE_ID_INTEL_82915G_HB),
ID(PCI_DEVICE_ID_INTEL_82915GM_HB),
ID(PCI_DEVICE_ID_INTEL_82945G_HB),
{ }
};

View file

@ -133,11 +133,14 @@ static int sgi_tioca_insert_memory(struct agp_memory *mem, off_t pg_start,
off_t j;
void *temp;
struct agp_bridge_data *bridge;
u64 *table;
bridge = mem->bridge;
if (!bridge)
return -EINVAL;
table = (u64 *)bridge->gatt_table;
temp = bridge->current_size;
switch (bridge->driver->size_type) {
@ -175,7 +178,7 @@ static int sgi_tioca_insert_memory(struct agp_memory *mem, off_t pg_start,
j = pg_start;
while (j < (pg_start + mem->page_count)) {
if (*(bridge->gatt_table + j))
if (table[j])
return -EBUSY;
j++;
}
@ -186,7 +189,7 @@ static int sgi_tioca_insert_memory(struct agp_memory *mem, off_t pg_start,
}
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
*(bridge->gatt_table + j) =
table[j] =
bridge->driver->mask_memory(bridge, mem->memory[i],
mem->type);
}
@ -200,6 +203,7 @@ static int sgi_tioca_remove_memory(struct agp_memory *mem, off_t pg_start,
{
size_t i;
struct agp_bridge_data *bridge;
u64 *table;
bridge = mem->bridge;
if (!bridge)
@ -209,8 +213,10 @@ static int sgi_tioca_remove_memory(struct agp_memory *mem, off_t pg_start,
return -EINVAL;
}
table = (u64 *)bridge->gatt_table;
for (i = pg_start; i < (mem->page_count + pg_start); i++) {
*(bridge->gatt_table + i) = 0;
table[i] = 0;
}
bridge->driver->tlb_flush(mem);

View file

@ -51,7 +51,7 @@ static int serverworks_create_page_map(struct serverworks_page_map *page_map)
}
SetPageReserved(virt_to_page(page_map->real));
global_cache_flush();
page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real),
page_map->remapped = ioremap_nocache(virt_to_gart(page_map->real),
PAGE_SIZE);
if (page_map->remapped == NULL) {
ClearPageReserved(virt_to_page(page_map->real));
@ -162,7 +162,7 @@ static int serverworks_create_gatt_table(struct agp_bridge_data *bridge)
/* Create a fake scratch directory */
for(i = 0; i < 1024; i++) {
writel(agp_bridge->scratch_page, serverworks_private.scratch_dir.remapped+i);
writel(virt_to_phys(serverworks_private.scratch_dir.real) | 1, page_dir.remapped+i);
writel(virt_to_gart(serverworks_private.scratch_dir.real) | 1, page_dir.remapped+i);
}
retval = serverworks_create_gatt_pages(value->num_entries / 1024);
@ -174,7 +174,7 @@ static int serverworks_create_gatt_table(struct agp_bridge_data *bridge)
agp_bridge->gatt_table_real = (u32 *)page_dir.real;
agp_bridge->gatt_table = (u32 __iomem *)page_dir.remapped;
agp_bridge->gatt_bus_addr = virt_to_phys(page_dir.real);
agp_bridge->gatt_bus_addr = virt_to_gart(page_dir.real);
/* Get the address for the gart region.
* This is a bus address even on the alpha, b/c its
@ -187,7 +187,7 @@ static int serverworks_create_gatt_table(struct agp_bridge_data *bridge)
/* Calculate the agp offset */
for(i = 0; i < value->num_entries / 1024; i++)
writel(virt_to_phys(serverworks_private.gatt_pages[i]->real)|1, page_dir.remapped+i);
writel(virt_to_gart(serverworks_private.gatt_pages[i]->real)|1, page_dir.remapped+i);
return 0;
}

View file

@ -407,7 +407,7 @@ static int uninorth_create_gatt_table(struct agp_bridge_data *bridge)
bridge->gatt_table_real = (u32 *) table;
bridge->gatt_table = (u32 *)table;
bridge->gatt_bus_addr = virt_to_phys(table);
bridge->gatt_bus_addr = virt_to_gart(table);
for (i = 0; i < num_entries; i++)
bridge->gatt_table[i] = 0;

View file

@ -1995,9 +1995,6 @@ static void mxser_receive_chars(struct mxser_struct *info, int *status)
unsigned char ch, gdl;
int ignored = 0;
int cnt = 0;
unsigned char *cp;
char *fp;
int count;
int recv_room;
int max = 256;
unsigned long flags;
@ -2011,10 +2008,6 @@ static void mxser_receive_chars(struct mxser_struct *info, int *status)
//return;
}
cp = tty->flip.char_buf;
fp = tty->flip.flag_buf;
count = 0;
// following add by Victor Yu. 09-02-2002
if (info->IsMoxaMustChipFlag != MOXA_OTHER_UART) {
@ -2041,12 +2034,10 @@ static void mxser_receive_chars(struct mxser_struct *info, int *status)
}
while (gdl--) {
ch = inb(info->base + UART_RX);
count++;
*cp++ = ch;
*fp++ = 0;
tty_insert_flip_char(tty, ch, 0);
cnt++;
/*
if((count>=HI_WATER) && (info->stop_rx==0)){
if((cnt>=HI_WATER) && (info->stop_rx==0)){
mxser_stoprx(tty);
info->stop_rx=1;
break;
@ -2061,7 +2052,7 @@ intr_old:
if (max-- < 0)
break;
/*
if((count>=HI_WATER) && (info->stop_rx==0)){
if((cnt>=HI_WATER) && (info->stop_rx==0)){
mxser_stoprx(tty);
info->stop_rx=1;
break;
@ -2078,36 +2069,33 @@ intr_old:
if (++ignored > 100)
break;
} else {
count++;
char flag = 0;
if (*status & UART_LSR_SPECIAL) {
if (*status & UART_LSR_BI) {
*fp++ = TTY_BREAK;
flag = TTY_BREAK;
/* added by casper 1/11/2000 */
info->icount.brk++;
/* */
if (info->flags & ASYNC_SAK)
do_SAK(tty);
} else if (*status & UART_LSR_PE) {
*fp++ = TTY_PARITY;
flag = TTY_PARITY;
/* added by casper 1/11/2000 */
info->icount.parity++;
/* */
} else if (*status & UART_LSR_FE) {
*fp++ = TTY_FRAME;
flag = TTY_FRAME;
/* added by casper 1/11/2000 */
info->icount.frame++;
/* */
} else if (*status & UART_LSR_OE) {
*fp++ = TTY_OVERRUN;
flag = TTY_OVERRUN;
/* added by casper 1/11/2000 */
info->icount.overrun++;
/* */
} else
*fp++ = 0;
} else
*fp++ = 0;
*cp++ = ch;
}
}
tty_insert_flip_char(tty, ch, flag);
cnt++;
if (cnt >= recv_room) {
if (!info->ldisc_stop_rx) {
@ -2132,13 +2120,13 @@ intr_old:
// above add by Victor Yu. 09-02-2002
} while (*status & UART_LSR_DR);
end_intr: // add by Victor Yu. 09-02-2002
end_intr: // add by Victor Yu. 09-02-2002
mxvar_log.rxcnt[info->port] += cnt;
info->mon_data.rxcnt += cnt;
info->mon_data.up_rxcnt += cnt;
spin_unlock_irqrestore(&info->slock, flags);
tty_flip_buffer_push(tty);
}

View file

@ -46,6 +46,10 @@ config CPU_FREQ_STAT_DETAILS
This will show detail CPU frequency translation table in sysfs file
system
# Note that it is not currently possible to set the other governors (such as ondemand)
# as the default, since if they fail to initialise, cpufreq will be
# left in an undefined state.
choice
prompt "Default CPUFreq governor"
default CPU_FREQ_DEFAULT_GOV_USERSPACE if CPU_FREQ_SA1100 || CPU_FREQ_SA1110
@ -115,4 +119,24 @@ config CPU_FREQ_GOV_ONDEMAND
If in doubt, say N.
config CPU_FREQ_GOV_CONSERVATIVE
tristate "'conservative' cpufreq governor"
depends on CPU_FREQ
help
'conservative' - this driver is rather similar to the 'ondemand'
governor both in its source code and its purpose, the difference is
its optimisation for better suitability in a battery powered
environment. The frequency is gracefully increased and decreased
rather than jumping to 100% when speed is required.
If you have a desktop machine then you should really be considering
the 'ondemand' governor instead, however if you are using a laptop,
PDA or even an AMD64 based computer (due to the unacceptable
step-by-step latency issues between the minimum and maximum frequency
transitions in the CPU) you will probably want to use this governor.
For details, take a look at linux/Documentation/cpu-freq.
If in doubt, say N.
endif # CPU_FREQ

View file

@ -8,6 +8,7 @@ obj-$(CONFIG_CPU_FREQ_GOV_PERFORMANCE) += cpufreq_performance.o
obj-$(CONFIG_CPU_FREQ_GOV_POWERSAVE) += cpufreq_powersave.o
obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE) += cpufreq_userspace.o
obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o
obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o
# CPUfreq cross-arch helpers
obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o

View file

@ -258,7 +258,7 @@ void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
(likely(cpufreq_cpu_data[freqs->cpu]->cur)) &&
(unlikely(freqs->old != cpufreq_cpu_data[freqs->cpu]->cur)))
{
printk(KERN_WARNING "Warning: CPU frequency is %u, "
dprintk(KERN_WARNING "Warning: CPU frequency is %u, "
"cpufreq assumed %u kHz.\n", freqs->old, cpufreq_cpu_data[freqs->cpu]->cur);
freqs->old = cpufreq_cpu_data[freqs->cpu]->cur;
}
@ -814,7 +814,7 @@ static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq, unsigne
{
struct cpufreq_freqs freqs;
printk(KERN_WARNING "Warning: CPU frequency out of sync: cpufreq and timing "
dprintk(KERN_WARNING "Warning: CPU frequency out of sync: cpufreq and timing "
"core thinks of %u, is %u kHz.\n", old_freq, new_freq);
freqs.cpu = cpu;
@ -923,7 +923,7 @@ static int cpufreq_suspend(struct sys_device * sysdev, u32 state)
struct cpufreq_freqs freqs;
if (!(cpufreq_driver->flags & CPUFREQ_PM_NO_WARN))
printk(KERN_DEBUG "Warning: CPU frequency is %u, "
dprintk(KERN_DEBUG "Warning: CPU frequency is %u, "
"cpufreq assumed %u kHz.\n",
cur_freq, cpu_policy->cur);
@ -1004,7 +1004,7 @@ static int cpufreq_resume(struct sys_device * sysdev)
struct cpufreq_freqs freqs;
if (!(cpufreq_driver->flags & CPUFREQ_PM_NO_WARN))
printk(KERN_WARNING "Warning: CPU frequency"
dprintk(KERN_WARNING "Warning: CPU frequency"
"is %u, cpufreq assumed %u kHz.\n",
cur_freq, cpu_policy->cur);

View file

@ -0,0 +1,586 @@
/*
* drivers/cpufreq/cpufreq_conservative.c
*
* Copyright (C) 2001 Russell King
* (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
* Jun Nakajima <jun.nakajima@intel.com>
* (C) 2004 Alexander Clouter <alex-kernel@digriz.org.uk>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/ctype.h>
#include <linux/cpufreq.h>
#include <linux/sysctl.h>
#include <linux/types.h>
#include <linux/fs.h>
#include <linux/sysfs.h>
#include <linux/sched.h>
#include <linux/kmod.h>
#include <linux/workqueue.h>
#include <linux/jiffies.h>
#include <linux/kernel_stat.h>
#include <linux/percpu.h>
/*
* dbs is used in this file as a shortform for demandbased switching
* It helps to keep variable names smaller, simpler
*/
#define DEF_FREQUENCY_UP_THRESHOLD (80)
#define MIN_FREQUENCY_UP_THRESHOLD (0)
#define MAX_FREQUENCY_UP_THRESHOLD (100)
#define DEF_FREQUENCY_DOWN_THRESHOLD (20)
#define MIN_FREQUENCY_DOWN_THRESHOLD (0)
#define MAX_FREQUENCY_DOWN_THRESHOLD (100)
/*
* The polling frequency of this governor depends on the capability of
* the processor. Default polling frequency is 1000 times the transition
* latency of the processor. The governor will work on any processor with
* transition latency <= 10mS, using appropriate sampling
* rate.
* For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL)
* this governor will not work.
* All times here are in uS.
*/
static unsigned int def_sampling_rate;
#define MIN_SAMPLING_RATE (def_sampling_rate / 2)
#define MAX_SAMPLING_RATE (500 * def_sampling_rate)
#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (100000)
#define DEF_SAMPLING_DOWN_FACTOR (5)
#define TRANSITION_LATENCY_LIMIT (10 * 1000)
static void do_dbs_timer(void *data);
struct cpu_dbs_info_s {
struct cpufreq_policy *cur_policy;
unsigned int prev_cpu_idle_up;
unsigned int prev_cpu_idle_down;
unsigned int enable;
};
static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
static unsigned int dbs_enable; /* number of CPUs using this policy */
static DECLARE_MUTEX (dbs_sem);
static DECLARE_WORK (dbs_work, do_dbs_timer, NULL);
struct dbs_tuners {
unsigned int sampling_rate;
unsigned int sampling_down_factor;
unsigned int up_threshold;
unsigned int down_threshold;
unsigned int ignore_nice;
unsigned int freq_step;
};
static struct dbs_tuners dbs_tuners_ins = {
.up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
.down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD,
.sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
};
static inline unsigned int get_cpu_idle_time(unsigned int cpu)
{
return kstat_cpu(cpu).cpustat.idle +
kstat_cpu(cpu).cpustat.iowait +
( !dbs_tuners_ins.ignore_nice ?
kstat_cpu(cpu).cpustat.nice :
0);
}
/************************** sysfs interface ************************/
static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf)
{
return sprintf (buf, "%u\n", MAX_SAMPLING_RATE);
}
static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf)
{
return sprintf (buf, "%u\n", MIN_SAMPLING_RATE);
}
#define define_one_ro(_name) \
static struct freq_attr _name = \
__ATTR(_name, 0444, show_##_name, NULL)
define_one_ro(sampling_rate_max);
define_one_ro(sampling_rate_min);
/* cpufreq_conservative Governor Tunables */
#define show_one(file_name, object) \
static ssize_t show_##file_name \
(struct cpufreq_policy *unused, char *buf) \
{ \
return sprintf(buf, "%u\n", dbs_tuners_ins.object); \
}
show_one(sampling_rate, sampling_rate);
show_one(sampling_down_factor, sampling_down_factor);
show_one(up_threshold, up_threshold);
show_one(down_threshold, down_threshold);
show_one(ignore_nice, ignore_nice);
show_one(freq_step, freq_step);
static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused,
const char *buf, size_t count)
{
unsigned int input;
int ret;
ret = sscanf (buf, "%u", &input);
if (ret != 1 )
return -EINVAL;
down(&dbs_sem);
dbs_tuners_ins.sampling_down_factor = input;
up(&dbs_sem);
return count;
}
static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
const char *buf, size_t count)
{
unsigned int input;
int ret;
ret = sscanf (buf, "%u", &input);
down(&dbs_sem);
if (ret != 1 || input > MAX_SAMPLING_RATE || input < MIN_SAMPLING_RATE) {
up(&dbs_sem);
return -EINVAL;
}
dbs_tuners_ins.sampling_rate = input;
up(&dbs_sem);
return count;
}
static ssize_t store_up_threshold(struct cpufreq_policy *unused,
const char *buf, size_t count)
{
unsigned int input;
int ret;
ret = sscanf (buf, "%u", &input);
down(&dbs_sem);
if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
input < MIN_FREQUENCY_UP_THRESHOLD ||
input <= dbs_tuners_ins.down_threshold) {
up(&dbs_sem);
return -EINVAL;
}
dbs_tuners_ins.up_threshold = input;
up(&dbs_sem);
return count;
}
static ssize_t store_down_threshold(struct cpufreq_policy *unused,
const char *buf, size_t count)
{
unsigned int input;
int ret;
ret = sscanf (buf, "%u", &input);
down(&dbs_sem);
if (ret != 1 || input > MAX_FREQUENCY_DOWN_THRESHOLD ||
input < MIN_FREQUENCY_DOWN_THRESHOLD ||
input >= dbs_tuners_ins.up_threshold) {
up(&dbs_sem);
return -EINVAL;
}
dbs_tuners_ins.down_threshold = input;
up(&dbs_sem);
return count;
}
static ssize_t store_ignore_nice(struct cpufreq_policy *policy,
const char *buf, size_t count)
{
unsigned int input;
int ret;
unsigned int j;
ret = sscanf (buf, "%u", &input);
if ( ret != 1 )
return -EINVAL;
if ( input > 1 )
input = 1;
down(&dbs_sem);
if ( input == dbs_tuners_ins.ignore_nice ) { /* nothing to do */
up(&dbs_sem);
return count;
}
dbs_tuners_ins.ignore_nice = input;
/* we need to re-evaluate prev_cpu_idle_up and prev_cpu_idle_down */
for_each_online_cpu(j) {
struct cpu_dbs_info_s *j_dbs_info;
j_dbs_info = &per_cpu(cpu_dbs_info, j);
j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j);
j_dbs_info->prev_cpu_idle_down = j_dbs_info->prev_cpu_idle_up;
}
up(&dbs_sem);
return count;
}
static ssize_t store_freq_step(struct cpufreq_policy *policy,
const char *buf, size_t count)
{
unsigned int input;
int ret;
ret = sscanf (buf, "%u", &input);
if ( ret != 1 )
return -EINVAL;
if ( input > 100 )
input = 100;
/* no need to test here if freq_step is zero as the user might actually
* want this, they would be crazy though :) */
down(&dbs_sem);
dbs_tuners_ins.freq_step = input;
up(&dbs_sem);
return count;
}
#define define_one_rw(_name) \
static struct freq_attr _name = \
__ATTR(_name, 0644, show_##_name, store_##_name)
define_one_rw(sampling_rate);
define_one_rw(sampling_down_factor);
define_one_rw(up_threshold);
define_one_rw(down_threshold);
define_one_rw(ignore_nice);
define_one_rw(freq_step);
static struct attribute * dbs_attributes[] = {
&sampling_rate_max.attr,
&sampling_rate_min.attr,
&sampling_rate.attr,
&sampling_down_factor.attr,
&up_threshold.attr,
&down_threshold.attr,
&ignore_nice.attr,
&freq_step.attr,
NULL
};
static struct attribute_group dbs_attr_group = {
.attrs = dbs_attributes,
.name = "conservative",
};
/************************** sysfs end ************************/
static void dbs_check_cpu(int cpu)
{
unsigned int idle_ticks, up_idle_ticks, down_idle_ticks;
unsigned int freq_step;
unsigned int freq_down_sampling_rate;
static int down_skip[NR_CPUS];
static int requested_freq[NR_CPUS];
static unsigned short init_flag = 0;
struct cpu_dbs_info_s *this_dbs_info;
struct cpu_dbs_info_s *dbs_info;
struct cpufreq_policy *policy;
unsigned int j;
this_dbs_info = &per_cpu(cpu_dbs_info, cpu);
if (!this_dbs_info->enable)
return;
policy = this_dbs_info->cur_policy;
if ( init_flag == 0 ) {
for ( /* NULL */; init_flag < NR_CPUS; init_flag++ ) {
dbs_info = &per_cpu(cpu_dbs_info, init_flag);
requested_freq[cpu] = dbs_info->cur_policy->cur;
}
init_flag = 1;
}
/*
* The default safe range is 20% to 80%
* Every sampling_rate, we check
* - If current idle time is less than 20%, then we try to
* increase frequency
* Every sampling_rate*sampling_down_factor, we check
* - If current idle time is more than 80%, then we try to
* decrease frequency
*
* Any frequency increase takes it to the maximum frequency.
* Frequency reduction happens at minimum steps of
* 5% (default) of max_frequency
*/
/* Check for frequency increase */
idle_ticks = UINT_MAX;
for_each_cpu_mask(j, policy->cpus) {
unsigned int tmp_idle_ticks, total_idle_ticks;
struct cpu_dbs_info_s *j_dbs_info;
j_dbs_info = &per_cpu(cpu_dbs_info, j);
/* Check for frequency increase */
total_idle_ticks = get_cpu_idle_time(j);
tmp_idle_ticks = total_idle_ticks -
j_dbs_info->prev_cpu_idle_up;
j_dbs_info->prev_cpu_idle_up = total_idle_ticks;
if (tmp_idle_ticks < idle_ticks)
idle_ticks = tmp_idle_ticks;
}
/* Scale idle ticks by 100 and compare with up and down ticks */
idle_ticks *= 100;
up_idle_ticks = (100 - dbs_tuners_ins.up_threshold) *
usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
if (idle_ticks < up_idle_ticks) {
down_skip[cpu] = 0;
for_each_cpu_mask(j, policy->cpus) {
struct cpu_dbs_info_s *j_dbs_info;
j_dbs_info = &per_cpu(cpu_dbs_info, j);
j_dbs_info->prev_cpu_idle_down =
j_dbs_info->prev_cpu_idle_up;
}
/* if we are already at full speed then break out early */
if (requested_freq[cpu] == policy->max)
return;
freq_step = (dbs_tuners_ins.freq_step * policy->max) / 100;
/* max freq cannot be less than 100. But who knows.... */
if (unlikely(freq_step == 0))
freq_step = 5;
requested_freq[cpu] += freq_step;
if (requested_freq[cpu] > policy->max)
requested_freq[cpu] = policy->max;
__cpufreq_driver_target(policy, requested_freq[cpu],
CPUFREQ_RELATION_H);
return;
}
/* Check for frequency decrease */
down_skip[cpu]++;
if (down_skip[cpu] < dbs_tuners_ins.sampling_down_factor)
return;
idle_ticks = UINT_MAX;
for_each_cpu_mask(j, policy->cpus) {
unsigned int tmp_idle_ticks, total_idle_ticks;
struct cpu_dbs_info_s *j_dbs_info;
j_dbs_info = &per_cpu(cpu_dbs_info, j);
total_idle_ticks = j_dbs_info->prev_cpu_idle_up;
tmp_idle_ticks = total_idle_ticks -
j_dbs_info->prev_cpu_idle_down;
j_dbs_info->prev_cpu_idle_down = total_idle_ticks;
if (tmp_idle_ticks < idle_ticks)
idle_ticks = tmp_idle_ticks;
}
/* Scale idle ticks by 100 and compare with up and down ticks */
idle_ticks *= 100;
down_skip[cpu] = 0;
freq_down_sampling_rate = dbs_tuners_ins.sampling_rate *
dbs_tuners_ins.sampling_down_factor;
down_idle_ticks = (100 - dbs_tuners_ins.down_threshold) *
usecs_to_jiffies(freq_down_sampling_rate);
if (idle_ticks > down_idle_ticks) {
/* if we are already at the lowest speed then break out early
* or if we 'cannot' reduce the speed as the user might want
* freq_step to be zero */
if (requested_freq[cpu] == policy->min
|| dbs_tuners_ins.freq_step == 0)
return;
freq_step = (dbs_tuners_ins.freq_step * policy->max) / 100;
/* max freq cannot be less than 100. But who knows.... */
if (unlikely(freq_step == 0))
freq_step = 5;
requested_freq[cpu] -= freq_step;
if (requested_freq[cpu] < policy->min)
requested_freq[cpu] = policy->min;
__cpufreq_driver_target(policy,
requested_freq[cpu],
CPUFREQ_RELATION_H);
return;
}
}
static void do_dbs_timer(void *data)
{
int i;
down(&dbs_sem);
for_each_online_cpu(i)
dbs_check_cpu(i);
schedule_delayed_work(&dbs_work,
usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
up(&dbs_sem);
}
static inline void dbs_timer_init(void)
{
INIT_WORK(&dbs_work, do_dbs_timer, NULL);
schedule_delayed_work(&dbs_work,
usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
return;
}
static inline void dbs_timer_exit(void)
{
cancel_delayed_work(&dbs_work);
return;
}
static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
unsigned int event)
{
unsigned int cpu = policy->cpu;
struct cpu_dbs_info_s *this_dbs_info;
unsigned int j;
this_dbs_info = &per_cpu(cpu_dbs_info, cpu);
switch (event) {
case CPUFREQ_GOV_START:
if ((!cpu_online(cpu)) ||
(!policy->cur))
return -EINVAL;
if (policy->cpuinfo.transition_latency >
(TRANSITION_LATENCY_LIMIT * 1000))
return -EINVAL;
if (this_dbs_info->enable) /* Already enabled */
break;
down(&dbs_sem);
for_each_cpu_mask(j, policy->cpus) {
struct cpu_dbs_info_s *j_dbs_info;
j_dbs_info = &per_cpu(cpu_dbs_info, j);
j_dbs_info->cur_policy = policy;
j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j);
j_dbs_info->prev_cpu_idle_down
= j_dbs_info->prev_cpu_idle_up;
}
this_dbs_info->enable = 1;
sysfs_create_group(&policy->kobj, &dbs_attr_group);
dbs_enable++;
/*
* Start the timerschedule work, when this governor
* is used for first time
*/
if (dbs_enable == 1) {
unsigned int latency;
/* policy latency is in nS. Convert it to uS first */
latency = policy->cpuinfo.transition_latency;
if (latency < 1000)
latency = 1000;
def_sampling_rate = (latency / 1000) *
DEF_SAMPLING_RATE_LATENCY_MULTIPLIER;
dbs_tuners_ins.sampling_rate = def_sampling_rate;
dbs_tuners_ins.ignore_nice = 0;
dbs_tuners_ins.freq_step = 5;
dbs_timer_init();
}
up(&dbs_sem);
break;
case CPUFREQ_GOV_STOP:
down(&dbs_sem);
this_dbs_info->enable = 0;
sysfs_remove_group(&policy->kobj, &dbs_attr_group);
dbs_enable--;
/*
* Stop the timerschedule work, when this governor
* is used for first time
*/
if (dbs_enable == 0)
dbs_timer_exit();
up(&dbs_sem);
break;
case CPUFREQ_GOV_LIMITS:
down(&dbs_sem);
if (policy->max < this_dbs_info->cur_policy->cur)
__cpufreq_driver_target(
this_dbs_info->cur_policy,
policy->max, CPUFREQ_RELATION_H);
else if (policy->min > this_dbs_info->cur_policy->cur)
__cpufreq_driver_target(
this_dbs_info->cur_policy,
policy->min, CPUFREQ_RELATION_L);
up(&dbs_sem);
break;
}
return 0;
}
static struct cpufreq_governor cpufreq_gov_dbs = {
.name = "conservative",
.governor = cpufreq_governor_dbs,
.owner = THIS_MODULE,
};
static int __init cpufreq_gov_dbs_init(void)
{
return cpufreq_register_governor(&cpufreq_gov_dbs);
}
static void __exit cpufreq_gov_dbs_exit(void)
{
/* Make sure that the scheduled work is indeed not running */
flush_scheduled_work();
cpufreq_unregister_governor(&cpufreq_gov_dbs);
}
MODULE_AUTHOR ("Alexander Clouter <alex-kernel@digriz.org.uk>");
MODULE_DESCRIPTION ("'cpufreq_conservative' - A dynamic cpufreq governor for "
"Low Latency Frequency Transition capable processors "
"optimised for use in a battery environment");
MODULE_LICENSE ("GPL");
module_init(cpufreq_gov_dbs_init);
module_exit(cpufreq_gov_dbs_exit);

View file

@ -34,13 +34,9 @@
*/
#define DEF_FREQUENCY_UP_THRESHOLD (80)
#define MIN_FREQUENCY_UP_THRESHOLD (0)
#define MIN_FREQUENCY_UP_THRESHOLD (11)
#define MAX_FREQUENCY_UP_THRESHOLD (100)
#define DEF_FREQUENCY_DOWN_THRESHOLD (20)
#define MIN_FREQUENCY_DOWN_THRESHOLD (0)
#define MAX_FREQUENCY_DOWN_THRESHOLD (100)
/*
* The polling frequency of this governor depends on the capability of
* the processor. Default polling frequency is 1000 times the transition
@ -55,9 +51,9 @@ static unsigned int def_sampling_rate;
#define MIN_SAMPLING_RATE (def_sampling_rate / 2)
#define MAX_SAMPLING_RATE (500 * def_sampling_rate)
#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000)
#define DEF_SAMPLING_DOWN_FACTOR (10)
#define DEF_SAMPLING_DOWN_FACTOR (1)
#define MAX_SAMPLING_DOWN_FACTOR (10)
#define TRANSITION_LATENCY_LIMIT (10 * 1000)
#define sampling_rate_in_HZ(x) (((x * HZ) < (1000 * 1000))?1:((x * HZ) / (1000 * 1000)))
static void do_dbs_timer(void *data);
@ -78,15 +74,23 @@ struct dbs_tuners {
unsigned int sampling_rate;
unsigned int sampling_down_factor;
unsigned int up_threshold;
unsigned int down_threshold;
unsigned int ignore_nice;
};
static struct dbs_tuners dbs_tuners_ins = {
.up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
.down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD,
.sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
};
static inline unsigned int get_cpu_idle_time(unsigned int cpu)
{
return kstat_cpu(cpu).cpustat.idle +
kstat_cpu(cpu).cpustat.iowait +
( !dbs_tuners_ins.ignore_nice ?
kstat_cpu(cpu).cpustat.nice :
0);
}
/************************** sysfs interface ************************/
static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf)
{
@ -115,7 +119,7 @@ static ssize_t show_##file_name \
show_one(sampling_rate, sampling_rate);
show_one(sampling_down_factor, sampling_down_factor);
show_one(up_threshold, up_threshold);
show_one(down_threshold, down_threshold);
show_one(ignore_nice, ignore_nice);
static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused,
const char *buf, size_t count)
@ -126,6 +130,9 @@ static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused,
if (ret != 1 )
return -EINVAL;
if (input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
return -EINVAL;
down(&dbs_sem);
dbs_tuners_ins.sampling_down_factor = input;
up(&dbs_sem);
@ -161,8 +168,7 @@ static ssize_t store_up_threshold(struct cpufreq_policy *unused,
down(&dbs_sem);
if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
input < MIN_FREQUENCY_UP_THRESHOLD ||
input <= dbs_tuners_ins.down_threshold) {
input < MIN_FREQUENCY_UP_THRESHOLD) {
up(&dbs_sem);
return -EINVAL;
}
@ -173,22 +179,35 @@ static ssize_t store_up_threshold(struct cpufreq_policy *unused,
return count;
}
static ssize_t store_down_threshold(struct cpufreq_policy *unused,
static ssize_t store_ignore_nice(struct cpufreq_policy *policy,
const char *buf, size_t count)
{
unsigned int input;
int ret;
unsigned int j;
ret = sscanf (buf, "%u", &input);
down(&dbs_sem);
if (ret != 1 || input > MAX_FREQUENCY_DOWN_THRESHOLD ||
input < MIN_FREQUENCY_DOWN_THRESHOLD ||
input >= dbs_tuners_ins.up_threshold) {
up(&dbs_sem);
if ( ret != 1 )
return -EINVAL;
}
dbs_tuners_ins.down_threshold = input;
if ( input > 1 )
input = 1;
down(&dbs_sem);
if ( input == dbs_tuners_ins.ignore_nice ) { /* nothing to do */
up(&dbs_sem);
return count;
}
dbs_tuners_ins.ignore_nice = input;
/* we need to re-evaluate prev_cpu_idle_up and prev_cpu_idle_down */
for_each_online_cpu(j) {
struct cpu_dbs_info_s *j_dbs_info;
j_dbs_info = &per_cpu(cpu_dbs_info, j);
j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j);
j_dbs_info->prev_cpu_idle_down = j_dbs_info->prev_cpu_idle_up;
}
up(&dbs_sem);
return count;
@ -201,7 +220,7 @@ __ATTR(_name, 0644, show_##_name, store_##_name)
define_one_rw(sampling_rate);
define_one_rw(sampling_down_factor);
define_one_rw(up_threshold);
define_one_rw(down_threshold);
define_one_rw(ignore_nice);
static struct attribute * dbs_attributes[] = {
&sampling_rate_max.attr,
@ -209,7 +228,7 @@ static struct attribute * dbs_attributes[] = {
&sampling_rate.attr,
&sampling_down_factor.attr,
&up_threshold.attr,
&down_threshold.attr,
&ignore_nice.attr,
NULL
};
@ -222,9 +241,8 @@ static struct attribute_group dbs_attr_group = {
static void dbs_check_cpu(int cpu)
{
unsigned int idle_ticks, up_idle_ticks, down_idle_ticks;
unsigned int total_idle_ticks;
unsigned int freq_down_step;
unsigned int idle_ticks, up_idle_ticks, total_ticks;
unsigned int freq_next;
unsigned int freq_down_sampling_rate;
static int down_skip[NR_CPUS];
struct cpu_dbs_info_s *this_dbs_info;
@ -238,38 +256,25 @@ static void dbs_check_cpu(int cpu)
policy = this_dbs_info->cur_policy;
/*
* The default safe range is 20% to 80%
* Every sampling_rate, we check
* - If current idle time is less than 20%, then we try to
* increase frequency
* Every sampling_rate*sampling_down_factor, we check
* - If current idle time is more than 80%, then we try to
* decrease frequency
* Every sampling_rate, we check, if current idle time is less
* than 20% (default), then we try to increase frequency
* Every sampling_rate*sampling_down_factor, we look for a the lowest
* frequency which can sustain the load while keeping idle time over
* 30%. If such a frequency exist, we try to decrease to this frequency.
*
* Any frequency increase takes it to the maximum frequency.
* Frequency reduction happens at minimum steps of
* 5% of max_frequency
* 5% (default) of current frequency
*/
/* Check for frequency increase */
total_idle_ticks = kstat_cpu(cpu).cpustat.idle +
kstat_cpu(cpu).cpustat.iowait;
idle_ticks = total_idle_ticks -
this_dbs_info->prev_cpu_idle_up;
this_dbs_info->prev_cpu_idle_up = total_idle_ticks;
idle_ticks = UINT_MAX;
for_each_cpu_mask(j, policy->cpus) {
unsigned int tmp_idle_ticks;
unsigned int tmp_idle_ticks, total_idle_ticks;
struct cpu_dbs_info_s *j_dbs_info;
if (j == cpu)
continue;
j_dbs_info = &per_cpu(cpu_dbs_info, j);
/* Check for frequency increase */
total_idle_ticks = kstat_cpu(j).cpustat.idle +
kstat_cpu(j).cpustat.iowait;
total_idle_ticks = get_cpu_idle_time(j);
tmp_idle_ticks = total_idle_ticks -
j_dbs_info->prev_cpu_idle_up;
j_dbs_info->prev_cpu_idle_up = total_idle_ticks;
@ -281,13 +286,23 @@ static void dbs_check_cpu(int cpu)
/* Scale idle ticks by 100 and compare with up and down ticks */
idle_ticks *= 100;
up_idle_ticks = (100 - dbs_tuners_ins.up_threshold) *
sampling_rate_in_HZ(dbs_tuners_ins.sampling_rate);
usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
if (idle_ticks < up_idle_ticks) {
down_skip[cpu] = 0;
for_each_cpu_mask(j, policy->cpus) {
struct cpu_dbs_info_s *j_dbs_info;
j_dbs_info = &per_cpu(cpu_dbs_info, j);
j_dbs_info->prev_cpu_idle_down =
j_dbs_info->prev_cpu_idle_up;
}
/* if we are already at full speed then break out early */
if (policy->cur == policy->max)
return;
__cpufreq_driver_target(policy, policy->max,
CPUFREQ_RELATION_H);
down_skip[cpu] = 0;
this_dbs_info->prev_cpu_idle_down = total_idle_ticks;
return;
}
@ -296,23 +311,14 @@ static void dbs_check_cpu(int cpu)
if (down_skip[cpu] < dbs_tuners_ins.sampling_down_factor)
return;
total_idle_ticks = kstat_cpu(cpu).cpustat.idle +
kstat_cpu(cpu).cpustat.iowait;
idle_ticks = total_idle_ticks -
this_dbs_info->prev_cpu_idle_down;
this_dbs_info->prev_cpu_idle_down = total_idle_ticks;
idle_ticks = UINT_MAX;
for_each_cpu_mask(j, policy->cpus) {
unsigned int tmp_idle_ticks;
unsigned int tmp_idle_ticks, total_idle_ticks;
struct cpu_dbs_info_s *j_dbs_info;
if (j == cpu)
continue;
j_dbs_info = &per_cpu(cpu_dbs_info, j);
/* Check for frequency increase */
total_idle_ticks = kstat_cpu(j).cpustat.idle +
kstat_cpu(j).cpustat.iowait;
/* Check for frequency decrease */
total_idle_ticks = j_dbs_info->prev_cpu_idle_up;
tmp_idle_ticks = total_idle_ticks -
j_dbs_info->prev_cpu_idle_down;
j_dbs_info->prev_cpu_idle_down = total_idle_ticks;
@ -321,38 +327,37 @@ static void dbs_check_cpu(int cpu)
idle_ticks = tmp_idle_ticks;
}
/* Scale idle ticks by 100 and compare with up and down ticks */
idle_ticks *= 100;
down_skip[cpu] = 0;
/* if we cannot reduce the frequency anymore, break out early */
if (policy->cur == policy->min)
return;
/* Compute how many ticks there are between two measurements */
freq_down_sampling_rate = dbs_tuners_ins.sampling_rate *
dbs_tuners_ins.sampling_down_factor;
down_idle_ticks = (100 - dbs_tuners_ins.down_threshold) *
sampling_rate_in_HZ(freq_down_sampling_rate);
total_ticks = usecs_to_jiffies(freq_down_sampling_rate);
if (idle_ticks > down_idle_ticks ) {
freq_down_step = (5 * policy->max) / 100;
/*
* The optimal frequency is the frequency that is the lowest that
* can support the current CPU usage without triggering the up
* policy. To be safe, we focus 10 points under the threshold.
*/
freq_next = ((total_ticks - idle_ticks) * 100) / total_ticks;
freq_next = (freq_next * policy->cur) /
(dbs_tuners_ins.up_threshold - 10);
/* max freq cannot be less than 100. But who knows.... */
if (unlikely(freq_down_step == 0))
freq_down_step = 5;
__cpufreq_driver_target(policy,
policy->cur - freq_down_step,
CPUFREQ_RELATION_H);
return;
}
if (freq_next <= ((policy->cur * 95) / 100))
__cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_L);
}
static void do_dbs_timer(void *data)
{
int i;
down(&dbs_sem);
for (i = 0; i < NR_CPUS; i++)
if (cpu_online(i))
dbs_check_cpu(i);
for_each_online_cpu(i)
dbs_check_cpu(i);
schedule_delayed_work(&dbs_work,
sampling_rate_in_HZ(dbs_tuners_ins.sampling_rate));
usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
up(&dbs_sem);
}
@ -360,7 +365,7 @@ static inline void dbs_timer_init(void)
{
INIT_WORK(&dbs_work, do_dbs_timer, NULL);
schedule_delayed_work(&dbs_work,
sampling_rate_in_HZ(dbs_tuners_ins.sampling_rate));
usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
return;
}
@ -397,12 +402,9 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
j_dbs_info = &per_cpu(cpu_dbs_info, j);
j_dbs_info->cur_policy = policy;
j_dbs_info->prev_cpu_idle_up =
kstat_cpu(j).cpustat.idle +
kstat_cpu(j).cpustat.iowait;
j_dbs_info->prev_cpu_idle_down =
kstat_cpu(j).cpustat.idle +
kstat_cpu(j).cpustat.iowait;
j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j);
j_dbs_info->prev_cpu_idle_down
= j_dbs_info->prev_cpu_idle_up;
}
this_dbs_info->enable = 1;
sysfs_create_group(&policy->kobj, &dbs_attr_group);
@ -422,6 +424,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
def_sampling_rate = (latency / 1000) *
DEF_SAMPLING_RATE_LATENCY_MULTIPLIER;
dbs_tuners_ins.sampling_rate = def_sampling_rate;
dbs_tuners_ins.ignore_nice = 0;
dbs_timer_init();
}
@ -461,12 +464,11 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
return 0;
}
struct cpufreq_governor cpufreq_gov_dbs = {
static struct cpufreq_governor cpufreq_gov_dbs = {
.name = "ondemand",
.governor = cpufreq_governor_dbs,
.owner = THIS_MODULE,
};
EXPORT_SYMBOL(cpufreq_gov_dbs);
static int __init cpufreq_gov_dbs_init(void)
{

View file

@ -19,6 +19,7 @@
#include <linux/percpu.h>
#include <linux/kobject.h>
#include <linux/spinlock.h>
#include <asm/cputime.h>
static spinlock_t cpufreq_stats_lock;
@ -29,20 +30,14 @@ static struct freq_attr _attr_##_name = {\
.show = _show,\
};
static unsigned long
delta_time(unsigned long old, unsigned long new)
{
return (old > new) ? (old - new): (new + ~old + 1);
}
struct cpufreq_stats {
unsigned int cpu;
unsigned int total_trans;
unsigned long long last_time;
unsigned long long last_time;
unsigned int max_state;
unsigned int state_num;
unsigned int last_index;
unsigned long long *time_in_state;
cputime64_t *time_in_state;
unsigned int *freq_table;
#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
unsigned int *trans_table;
@ -60,12 +55,16 @@ static int
cpufreq_stats_update (unsigned int cpu)
{
struct cpufreq_stats *stat;
unsigned long long cur_time;
cur_time = get_jiffies_64();
spin_lock(&cpufreq_stats_lock);
stat = cpufreq_stats_table[cpu];
if (stat->time_in_state)
stat->time_in_state[stat->last_index] +=
delta_time(stat->last_time, jiffies);
stat->last_time = jiffies;
stat->time_in_state[stat->last_index] =
cputime64_add(stat->time_in_state[stat->last_index],
cputime_sub(cur_time, stat->last_time));
stat->last_time = cur_time;
spin_unlock(&cpufreq_stats_lock);
return 0;
}
@ -90,8 +89,8 @@ show_time_in_state(struct cpufreq_policy *policy, char *buf)
return 0;
cpufreq_stats_update(stat->cpu);
for (i = 0; i < stat->state_num; i++) {
len += sprintf(buf + len, "%u %llu\n",
stat->freq_table[i], stat->time_in_state[i]);
len += sprintf(buf + len, "%u %llu\n", stat->freq_table[i],
(unsigned long long)cputime64_to_clock_t(stat->time_in_state[i]));
}
return len;
}
@ -107,16 +106,30 @@ show_trans_table(struct cpufreq_policy *policy, char *buf)
if(!stat)
return 0;
cpufreq_stats_update(stat->cpu);
len += snprintf(buf + len, PAGE_SIZE - len, " From : To\n");
len += snprintf(buf + len, PAGE_SIZE - len, " : ");
for (i = 0; i < stat->state_num; i++) {
if (len >= PAGE_SIZE)
break;
len += snprintf(buf + len, PAGE_SIZE - len, "%9u:\t",
len += snprintf(buf + len, PAGE_SIZE - len, "%9u ",
stat->freq_table[i]);
}
if (len >= PAGE_SIZE)
return len;
len += snprintf(buf + len, PAGE_SIZE - len, "\n");
for (i = 0; i < stat->state_num; i++) {
if (len >= PAGE_SIZE)
break;
len += snprintf(buf + len, PAGE_SIZE - len, "%9u: ",
stat->freq_table[i]);
for (j = 0; j < stat->state_num; j++) {
if (len >= PAGE_SIZE)
break;
len += snprintf(buf + len, PAGE_SIZE - len, "%u\t",
len += snprintf(buf + len, PAGE_SIZE - len, "%9u ",
stat->trans_table[i*stat->max_state+j]);
}
len += snprintf(buf + len, PAGE_SIZE - len, "\n");
@ -197,7 +210,7 @@ cpufreq_stats_create_table (struct cpufreq_policy *policy,
count++;
}
alloc_size = count * sizeof(int) + count * sizeof(long long);
alloc_size = count * sizeof(int) + count * sizeof(cputime64_t);
#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
alloc_size += count * count * sizeof(int);
@ -224,7 +237,7 @@ cpufreq_stats_create_table (struct cpufreq_policy *policy,
}
stat->state_num = j;
spin_lock(&cpufreq_stats_lock);
stat->last_time = jiffies;
stat->last_time = get_jiffies_64();
stat->last_index = freq_table_get_index(stat, policy->cur);
spin_unlock(&cpufreq_stats_lock);
cpufreq_cpu_put(data);

View file

@ -11,6 +11,7 @@
* published by the Free Software Foundation.
*/
#include <linux/config.h>
#include <linux/acpi.h>
#include <linux/console.h>
#include <linux/efi.h>

View file

@ -2,6 +2,7 @@
* i2c-ali1563.c - i2c driver for the ALi 1563 Southbridge
*
* Copyright (C) 2004 Patrick Mochel
* 2005 Rudolf Marek <r.marek@sh.cvut.cz>
*
* The 1563 southbridge is deceptively similar to the 1533, with a
* few notable exceptions. One of those happens to be the fact they
@ -57,10 +58,11 @@
#define HST_CNTL2_BLOCK 0x05
#define HST_CNTL2_SIZEMASK 0x38
static unsigned short ali1563_smba;
static int ali1563_transaction(struct i2c_adapter * a)
static int ali1563_transaction(struct i2c_adapter * a, int size)
{
u32 data;
int timeout;
@ -73,7 +75,7 @@ static int ali1563_transaction(struct i2c_adapter * a)
data = inb_p(SMB_HST_STS);
if (data & HST_STS_BAD) {
dev_warn(&a->dev,"ali1563: Trying to reset busy device\n");
dev_err(&a->dev, "ali1563: Trying to reset busy device\n");
outb_p(data | HST_STS_BAD,SMB_HST_STS);
data = inb_p(SMB_HST_STS);
if (data & HST_STS_BAD)
@ -94,19 +96,31 @@ static int ali1563_transaction(struct i2c_adapter * a)
if (timeout && !(data & HST_STS_BAD))
return 0;
dev_warn(&a->dev, "SMBus Error: %s%s%s%s%s\n",
timeout ? "Timeout " : "",
data & HST_STS_FAIL ? "Transaction Failed " : "",
data & HST_STS_BUSERR ? "No response or Bus Collision " : "",
data & HST_STS_DEVERR ? "Device Error " : "",
!(data & HST_STS_DONE) ? "Transaction Never Finished " : "");
if (!(data & HST_STS_DONE))
if (!timeout) {
dev_err(&a->dev, "Timeout - Trying to KILL transaction!\n");
/* Issue 'kill' to host controller */
outb_p(HST_CNTL2_KILL,SMB_HST_CNTL2);
else
/* Issue timeout to reset all devices on bus */
data = inb_p(SMB_HST_STS);
}
/* device error - no response, ignore the autodetection case */
if ((data & HST_STS_DEVERR) && (size != HST_CNTL2_QUICK)) {
dev_err(&a->dev, "Device error!\n");
}
/* bus collision */
if (data & HST_STS_BUSERR) {
dev_err(&a->dev, "Bus collision!\n");
/* Issue timeout, hoping it helps */
outb_p(HST_CNTL1_TIMEOUT,SMB_HST_CNTL1);
}
if (data & HST_STS_FAIL) {
dev_err(&a->dev, "Cleaning fail after KILL!\n");
outb_p(0x0,SMB_HST_CNTL2);
}
return -1;
}
@ -149,7 +163,7 @@ static int ali1563_block_start(struct i2c_adapter * a)
if (timeout && !(data & HST_STS_BAD))
return 0;
dev_warn(&a->dev, "SMBus Error: %s%s%s%s%s\n",
dev_err(&a->dev, "SMBus Error: %s%s%s%s%s\n",
timeout ? "Timeout " : "",
data & HST_STS_FAIL ? "Transaction Failed " : "",
data & HST_STS_BUSERR ? "No response or Bus Collision " : "",
@ -242,13 +256,15 @@ static s32 ali1563_access(struct i2c_adapter * a, u16 addr,
}
outb_p(((addr & 0x7f) << 1) | (rw & 0x01), SMB_HST_ADD);
outb_p(inb_p(SMB_HST_CNTL2) | (size << 3), SMB_HST_CNTL2);
outb_p((inb_p(SMB_HST_CNTL2) & ~HST_CNTL2_SIZEMASK) | (size << 3), SMB_HST_CNTL2);
/* Write the command register */
switch(size) {
case HST_CNTL2_BYTE:
if (rw== I2C_SMBUS_WRITE)
outb_p(cmd, SMB_HST_CMD);
/* Beware it uses DAT0 register and not CMD! */
outb_p(cmd, SMB_HST_DAT0);
break;
case HST_CNTL2_BYTE_DATA:
outb_p(cmd, SMB_HST_CMD);
@ -268,7 +284,7 @@ static s32 ali1563_access(struct i2c_adapter * a, u16 addr,
goto Done;
}
if ((error = ali1563_transaction(a)))
if ((error = ali1563_transaction(a, size)))
goto Done;
if ((rw == I2C_SMBUS_WRITE) || (size == HST_CNTL2_QUICK))

View file

@ -1936,7 +1936,7 @@ static ide_startstop_t cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
* NOTE! The "len" and "addr" checks should possibly have
* separate masks.
*/
if ((rq->data_len & mask) || (addr & mask))
if ((rq->data_len & 15) || (addr & mask))
info->dma = 0;
}

View file

@ -72,6 +72,7 @@ static struct amd_ide_chip {
{ PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2, 0x50, AMD_UDMA_133 },
{ PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_IDE, 0x50, AMD_UDMA_133 },
{ PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_IDE, 0x50, AMD_UDMA_133 },
{ PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE, 0x50, AMD_UDMA_133 },
{ 0 }
};
@ -487,6 +488,7 @@ static ide_pci_device_t amd74xx_chipsets[] __devinitdata = {
/* 12 */ DECLARE_NV_DEV("NFORCE3-250-SATA2"),
/* 13 */ DECLARE_NV_DEV("NFORCE-CK804"),
/* 14 */ DECLARE_NV_DEV("NFORCE-MCP04"),
/* 15 */ DECLARE_NV_DEV("NFORCE-MCP51"),
};
static int __devinit amd74xx_probe(struct pci_dev *dev, const struct pci_device_id *id)
@ -521,6 +523,7 @@ static struct pci_device_id amd74xx_pci_tbl[] = {
#endif
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 13 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 14 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 15 },
{ 0, },
};
MODULE_DEVICE_TABLE(pci, amd74xx_pci_tbl);

View file

@ -54,7 +54,7 @@ static int atkbd_softraw = 1;
module_param_named(softraw, atkbd_softraw, bool, 0);
MODULE_PARM_DESC(softraw, "Use software generated rawmode");
static int atkbd_scroll = 1;
static int atkbd_scroll = 0;
module_param_named(scroll, atkbd_scroll, bool, 0);
MODULE_PARM_DESC(scroll, "Enable scroll-wheel on MS Office and similar keyboards");

View file

@ -374,29 +374,6 @@ static inline void do_kiss_params(struct baycom_state *bc,
}
/* --------------------------------------------------------------------- */
/*
* high performance HDLC encoder
* yes, it's ugly, but generates pretty good code
*/
#define ENCODEITERA(j) \
({ \
if (!(notbitstream & (0x1f0 << j))) \
goto stuff##j; \
encodeend##j: ; \
})
#define ENCODEITERB(j) \
({ \
stuff##j: \
bitstream &= ~(0x100 << j); \
bitbuf = (bitbuf & (((2 << j) << numbit) - 1)) | \
((bitbuf & ~(((2 << j) << numbit) - 1)) << 1); \
numbit++; \
notbitstream = ~bitstream; \
goto encodeend##j; \
})
static void encode_hdlc(struct baycom_state *bc)
{
@ -405,6 +382,7 @@ static void encode_hdlc(struct baycom_state *bc)
int pkt_len;
unsigned bitstream, notbitstream, bitbuf, numbit, crc;
unsigned char crcarr[2];
int j;
if (bc->hdlctx.bufcnt > 0)
return;
@ -429,24 +407,14 @@ static void encode_hdlc(struct baycom_state *bc)
pkt_len--;
if (!pkt_len)
bp = crcarr;
ENCODEITERA(0);
ENCODEITERA(1);
ENCODEITERA(2);
ENCODEITERA(3);
ENCODEITERA(4);
ENCODEITERA(5);
ENCODEITERA(6);
ENCODEITERA(7);
goto enditer;
ENCODEITERB(0);
ENCODEITERB(1);
ENCODEITERB(2);
ENCODEITERB(3);
ENCODEITERB(4);
ENCODEITERB(5);
ENCODEITERB(6);
ENCODEITERB(7);
enditer:
for (j = 0; j < 8; j++)
if (unlikely(!(notbitstream & (0x1f0 << j)))) {
bitstream &= ~(0x100 << j);
bitbuf = (bitbuf & (((2 << j) << numbit) - 1)) |
((bitbuf & ~(((2 << j) << numbit) - 1)) << 1);
numbit++;
notbitstream = ~bitstream;
}
numbit += 8;
while (numbit >= 8) {
*wp++ = bitbuf;
@ -610,37 +578,6 @@ static void do_rxpacket(struct net_device *dev)
bc->stats.rx_packets++;
}
#define DECODEITERA(j) \
({ \
if (!(notbitstream & (0x0fc << j))) /* flag or abort */ \
goto flgabrt##j; \
if ((bitstream & (0x1f8 << j)) == (0xf8 << j)) /* stuffed bit */ \
goto stuff##j; \
enditer##j: ; \
})
#define DECODEITERB(j) \
({ \
flgabrt##j: \
if (!(notbitstream & (0x1fc << j))) { /* abort received */ \
state = 0; \
goto enditer##j; \
} \
if ((bitstream & (0x1fe << j)) != (0x0fc << j)) /* flag received */ \
goto enditer##j; \
if (state) \
do_rxpacket(dev); \
bc->hdlcrx.bufcnt = 0; \
bc->hdlcrx.bufptr = bc->hdlcrx.buf; \
state = 1; \
numbits = 7-j; \
goto enditer##j; \
stuff##j: \
numbits--; \
bitbuf = (bitbuf & ((~0xff) << j)) | ((bitbuf & ~((~0xff) << j)) << 1); \
goto enditer##j; \
})
static int receive(struct net_device *dev, int cnt)
{
struct baycom_state *bc = netdev_priv(dev);
@ -649,6 +586,7 @@ static int receive(struct net_device *dev, int cnt)
unsigned char tmp[128];
unsigned char *cp;
int cnt2, ret = 0;
int j;
numbits = bc->hdlcrx.numbits;
state = bc->hdlcrx.state;
@ -669,24 +607,32 @@ static int receive(struct net_device *dev, int cnt)
bitbuf |= (*cp) << 8;
numbits += 8;
notbitstream = ~bitstream;
DECODEITERA(0);
DECODEITERA(1);
DECODEITERA(2);
DECODEITERA(3);
DECODEITERA(4);
DECODEITERA(5);
DECODEITERA(6);
DECODEITERA(7);
goto enddec;
DECODEITERB(0);
DECODEITERB(1);
DECODEITERB(2);
DECODEITERB(3);
DECODEITERB(4);
DECODEITERB(5);
DECODEITERB(6);
DECODEITERB(7);
enddec:
for (j = 0; j < 8; j++) {
/* flag or abort */
if (unlikely(!(notbitstream & (0x0fc << j)))) {
/* abort received */
if (!(notbitstream & (0x1fc << j)))
state = 0;
/* not flag received */
else if (!(bitstream & (0x1fe << j)) != (0x0fc << j)) {
if (state)
do_rxpacket(dev);
bc->hdlcrx.bufcnt = 0;
bc->hdlcrx.bufptr = bc->hdlcrx.buf;
state = 1;
numbits = 7-j;
}
}
/* stuffed bit */
else if (unlikely((bitstream & (0x1f8 << j)) == (0xf8 << j))) {
numbits--;
bitbuf = (bitbuf & ((~0xff) << j)) | ((bitbuf & ~((~0xff) << j)) << 1);
}
}
while (state && numbits >= 8) {
if (bc->hdlcrx.bufcnt >= TXBUFFER_SIZE) {
state = 0;

View file

@ -1274,6 +1274,9 @@ static int el3_close(struct net_device *dev)
spin_lock_irqsave(&lp->window_lock, flags);
update_stats(dev);
spin_unlock_irqrestore(&lp->window_lock, flags);
/* force interrupts off */
outw(SetIntrEnb | 0x0000, ioaddr + EL3_CMD);
}
link->open--;

View file

@ -1585,8 +1585,8 @@ rtl8169_hw_start(struct net_device *dev)
RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
RTL_W8(EarlyTxThres, EarlyTxThld);
/* For gigabit rtl8169, MTU + header + CRC + VLAN */
RTL_W16(RxMaxSize, tp->rx_buf_sz);
/* Low hurts. Let's disable the filtering. */
RTL_W16(RxMaxSize, 16383);
/* Set Rx Config register */
i = rtl8169_rx_config |
@ -2127,6 +2127,11 @@ rtl8169_tx_interrupt(struct net_device *dev, struct rtl8169_private *tp,
}
}
static inline int rtl8169_fragmented_frame(u32 status)
{
return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag);
}
static inline void rtl8169_rx_csum(struct sk_buff *skb, struct RxDesc *desc)
{
u32 opts1 = le32_to_cpu(desc->opts1);
@ -2177,27 +2182,41 @@ rtl8169_rx_interrupt(struct net_device *dev, struct rtl8169_private *tp,
while (rx_left > 0) {
unsigned int entry = cur_rx % NUM_RX_DESC;
struct RxDesc *desc = tp->RxDescArray + entry;
u32 status;
rmb();
status = le32_to_cpu(tp->RxDescArray[entry].opts1);
status = le32_to_cpu(desc->opts1);
if (status & DescOwn)
break;
if (status & RxRES) {
printk(KERN_INFO "%s: Rx ERROR!!!\n", dev->name);
printk(KERN_INFO "%s: Rx ERROR. status = %08x\n",
dev->name, status);
tp->stats.rx_errors++;
if (status & (RxRWT | RxRUNT))
tp->stats.rx_length_errors++;
if (status & RxCRC)
tp->stats.rx_crc_errors++;
rtl8169_mark_to_asic(desc, tp->rx_buf_sz);
} else {
struct RxDesc *desc = tp->RxDescArray + entry;
struct sk_buff *skb = tp->Rx_skbuff[entry];
int pkt_size = (status & 0x00001FFF) - 4;
void (*pci_action)(struct pci_dev *, dma_addr_t,
size_t, int) = pci_dma_sync_single_for_device;
/*
* The driver does not support incoming fragmented
* frames. They are seen as a symptom of over-mtu
* sized frames.
*/
if (unlikely(rtl8169_fragmented_frame(status))) {
tp->stats.rx_dropped++;
tp->stats.rx_length_errors++;
rtl8169_mark_to_asic(desc, tp->rx_buf_sz);
goto move_on;
}
rtl8169_rx_csum(skb, desc);
pci_dma_sync_single_for_cpu(tp->pci_dev,
@ -2224,7 +2243,7 @@ rtl8169_rx_interrupt(struct net_device *dev, struct rtl8169_private *tp,
tp->stats.rx_bytes += pkt_size;
tp->stats.rx_packets++;
}
move_on:
cur_rx++;
rx_left--;
}

View file

@ -100,35 +100,8 @@ static int sh_debug; /* Debug flag */
#define SHAPER_BANNER "CymruNet Traffic Shaper BETA 0.04 for Linux 2.1\n"
/*
* Locking
*/
static int shaper_lock(struct shaper *sh)
{
/*
* Lock in an interrupt must fail
*/
while (test_and_set_bit(0, &sh->locked))
{
if (!in_interrupt())
sleep_on(&sh->wait_queue);
else
return 0;
}
return 1;
}
static void shaper_kick(struct shaper *sh);
static void shaper_unlock(struct shaper *sh)
{
clear_bit(0, &sh->locked);
wake_up(&sh->wait_queue);
shaper_kick(sh);
}
/*
* Compute clocks on a buffer
*/
@ -157,17 +130,15 @@ static void shaper_setspeed(struct shaper *shaper, int bitspersec)
* Throw a frame at a shaper.
*/
static int shaper_qframe(struct shaper *shaper, struct sk_buff *skb)
static int shaper_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct shaper *shaper = dev->priv;
struct sk_buff *ptr;
/*
* Get ready to work on this shaper. Lock may fail if its
* an interrupt and locked.
*/
if(!shaper_lock(shaper))
return -1;
if (down_trylock(&shaper->sem))
return -1;
ptr=shaper->sendq.prev;
/*
@ -260,7 +231,8 @@ static int shaper_qframe(struct shaper *shaper, struct sk_buff *skb)
dev_kfree_skb(ptr);
shaper->stats.collisions++;
}
shaper_unlock(shaper);
shaper_kick(shaper);
up(&shaper->sem);
return 0;
}
@ -297,8 +269,13 @@ static void shaper_queue_xmit(struct shaper *shaper, struct sk_buff *skb)
static void shaper_timer(unsigned long data)
{
struct shaper *sh=(struct shaper *)data;
shaper_kick(sh);
struct shaper *shaper = (struct shaper *)data;
if (!down_trylock(&shaper->sem)) {
shaper_kick(shaper);
up(&shaper->sem);
} else
mod_timer(&shaper->timer, jiffies);
}
/*
@ -310,19 +287,6 @@ static void shaper_kick(struct shaper *shaper)
{
struct sk_buff *skb;
/*
* Shaper unlock will kick
*/
if (test_and_set_bit(0, &shaper->locked))
{
if(sh_debug)
printk("Shaper locked.\n");
mod_timer(&shaper->timer, jiffies);
return;
}
/*
* Walk the list (may be empty)
*/
@ -364,8 +328,6 @@ static void shaper_kick(struct shaper *shaper)
if(skb!=NULL)
mod_timer(&shaper->timer, SHAPERCB(skb)->shapeclock);
clear_bit(0, &shaper->locked);
}
@ -376,14 +338,12 @@ static void shaper_kick(struct shaper *shaper)
static void shaper_flush(struct shaper *shaper)
{
struct sk_buff *skb;
if(!shaper_lock(shaper))
{
printk(KERN_ERR "shaper: shaper_flush() called by an irq!\n");
return;
}
down(&shaper->sem);
while((skb=skb_dequeue(&shaper->sendq))!=NULL)
dev_kfree_skb(skb);
shaper_unlock(shaper);
shaper_kick(shaper);
up(&shaper->sem);
}
/*
@ -426,13 +386,6 @@ static int shaper_close(struct net_device *dev)
* ARP and other resolutions and not before.
*/
static int shaper_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct shaper *sh=dev->priv;
return shaper_qframe(sh, skb);
}
static struct net_device_stats *shaper_get_stats(struct net_device *dev)
{
struct shaper *sh=dev->priv;
@ -623,7 +576,6 @@ static void shaper_init_priv(struct net_device *dev)
init_timer(&sh->timer);
sh->timer.function=shaper_timer;
sh->timer.data=(unsigned long)sh;
init_waitqueue_head(&sh->wait_queue);
}
/*

View file

@ -7,7 +7,12 @@
* Copyright (C) 2005 Broadcom Corporation.
*
* Firmware is:
* Copyright (C) 2000-2003 Broadcom Corporation.
* Derived from proprietary unpublished source code,
* Copyright (C) 2000-2003 Broadcom Corporation.
*
* Permission is hereby granted for the distribution of this firmware
* data in hexadecimal or equivalent format, provided this copyright
* notice is accompanying it.
*/
#include <linux/config.h>
@ -61,8 +66,8 @@
#define DRV_MODULE_NAME "tg3"
#define PFX DRV_MODULE_NAME ": "
#define DRV_MODULE_VERSION "3.29"
#define DRV_MODULE_RELDATE "May 23, 2005"
#define DRV_MODULE_VERSION "3.30"
#define DRV_MODULE_RELDATE "June 6, 2005"
#define TG3_DEF_MAC_MODE 0
#define TG3_DEF_RX_MODE 0
@ -8555,6 +8560,16 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
tp->led_ctrl = LED_CTRL_MODE_MAC;
/* Default to PHY_1_MODE if 0 (MAC_MODE) is
* read on some older 5700/5701 bootcode.
*/
if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
ASIC_REV_5700 ||
GET_ASIC_REV(tp->pci_chip_rev_id) ==
ASIC_REV_5701)
tp->led_ctrl = LED_CTRL_MODE_PHY_1;
break;
case SHASTA_EXT_LED_SHARED:

View file

@ -1,7 +1,7 @@
/*
* CompactPCI Hot Plug Driver
*
* Copyright (C) 2002 SOMA Networks, Inc.
* Copyright (C) 2002,2005 SOMA Networks, Inc.
* Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com)
* Copyright (C) 2001 IBM Corp.
*
@ -45,10 +45,10 @@
#define dbg(format, arg...) \
do { \
if(cpci_debug) \
if (cpci_debug) \
printk (KERN_DEBUG "%s: " format "\n", \
MY_NAME , ## arg); \
} while(0)
} while (0)
#define err(format, arg...) printk(KERN_ERR "%s: " format "\n", MY_NAME , ## arg)
#define info(format, arg...) printk(KERN_INFO "%s: " format "\n", MY_NAME , ## arg)
#define warn(format, arg...) printk(KERN_WARNING "%s: " format "\n", MY_NAME , ## arg)
@ -111,10 +111,8 @@ enable_slot(struct hotplug_slot *hotplug_slot)
dbg("%s - physical_slot = %s", __FUNCTION__, hotplug_slot->name);
if(controller->ops->set_power) {
if (controller->ops->set_power)
retval = controller->ops->set_power(slot, 1);
}
return retval;
}
@ -126,37 +124,41 @@ disable_slot(struct hotplug_slot *hotplug_slot)
dbg("%s - physical_slot = %s", __FUNCTION__, hotplug_slot->name);
down_write(&list_rwsem);
/* Unconfigure device */
dbg("%s - unconfiguring slot %s",
__FUNCTION__, slot->hotplug_slot->name);
if((retval = cpci_unconfigure_slot(slot))) {
if ((retval = cpci_unconfigure_slot(slot))) {
err("%s - could not unconfigure slot %s",
__FUNCTION__, slot->hotplug_slot->name);
return retval;
goto disable_error;
}
dbg("%s - finished unconfiguring slot %s",
__FUNCTION__, slot->hotplug_slot->name);
/* Clear EXT (by setting it) */
if(cpci_clear_ext(slot)) {
if (cpci_clear_ext(slot)) {
err("%s - could not clear EXT for slot %s",
__FUNCTION__, slot->hotplug_slot->name);
retval = -ENODEV;
goto disable_error;
}
cpci_led_on(slot);
if(controller->ops->set_power) {
retval = controller->ops->set_power(slot, 0);
}
if (controller->ops->set_power)
if ((retval = controller->ops->set_power(slot, 0)))
goto disable_error;
if(update_adapter_status(slot->hotplug_slot, 0)) {
if (update_adapter_status(slot->hotplug_slot, 0))
warn("failure to update adapter file");
}
if(slot->extracting) {
if (slot->extracting) {
slot->extracting = 0;
atomic_dec(&extracting);
}
disable_error:
up_write(&list_rwsem);
return retval;
}
@ -165,9 +167,8 @@ cpci_get_power_status(struct slot *slot)
{
u8 power = 1;
if(controller->ops->get_power) {
if (controller->ops->get_power)
power = controller->ops->get_power(slot);
}
return power;
}
@ -237,9 +238,8 @@ cpci_hp_register_bus(struct pci_bus *bus, u8 first, u8 last)
int status = -ENOMEM;
int i;
if(!(controller && bus)) {
if (!(controller && bus))
return -ENODEV;
}
/*
* Create a structure for each slot, and register that slot
@ -316,32 +316,30 @@ int
cpci_hp_unregister_bus(struct pci_bus *bus)
{
struct slot *slot;
struct list_head *tmp;
struct list_head *next;
int status;
struct slot *tmp;
int status = 0;
down_write(&list_rwsem);
if(!slots) {
if (!slots) {
up_write(&list_rwsem);
return -1;
}
list_for_each_safe(tmp, next, &slot_list) {
slot = list_entry(tmp, struct slot, slot_list);
if(slot->bus == bus) {
dbg("deregistering slot %s", slot->hotplug_slot->name);
status = pci_hp_deregister(slot->hotplug_slot);
if(status) {
err("pci_hp_deregister failed with error %d",
status);
return status;
}
list_for_each_entry_safe(slot, tmp, &slot_list, slot_list) {
if (slot->bus == bus) {
list_del(&slot->slot_list);
slots--;
dbg("deregistering slot %s", slot->hotplug_slot->name);
status = pci_hp_deregister(slot->hotplug_slot);
if (status) {
err("pci_hp_deregister failed with error %d",
status);
break;
}
}
}
up_write(&list_rwsem);
return 0;
return status;
}
/* This is the interrupt mode interrupt handler */
@ -351,7 +349,7 @@ cpci_hp_intr(int irq, void *data, struct pt_regs *regs)
dbg("entered cpci_hp_intr");
/* Check to see if it was our interrupt */
if((controller->irq_flags & SA_SHIRQ) &&
if ((controller->irq_flags & SA_SHIRQ) &&
!controller->ops->check_irq(controller->dev_id)) {
dbg("exited cpci_hp_intr, not our interrupt");
return IRQ_NONE;
@ -373,38 +371,30 @@ cpci_hp_intr(int irq, void *data, struct pt_regs *regs)
* INS bits of the cold-inserted devices.
*/
static int
init_slots(void)
init_slots(int clear_ins)
{
struct slot *slot;
struct list_head *tmp;
struct pci_dev* dev;
dbg("%s - enter", __FUNCTION__);
down_read(&list_rwsem);
if(!slots) {
if (!slots) {
up_read(&list_rwsem);
return -1;
}
list_for_each(tmp, &slot_list) {
slot = list_entry(tmp, struct slot, slot_list);
list_for_each_entry(slot, &slot_list, slot_list) {
dbg("%s - looking at slot %s",
__FUNCTION__, slot->hotplug_slot->name);
if(cpci_check_and_clear_ins(slot)) {
if (clear_ins && cpci_check_and_clear_ins(slot))
dbg("%s - cleared INS for slot %s",
__FUNCTION__, slot->hotplug_slot->name);
dev = pci_find_slot(slot->bus->number, PCI_DEVFN(slot->number, 0));
if(dev) {
if(update_adapter_status(slot->hotplug_slot, 1)) {
warn("failure to update adapter file");
}
if(update_latch_status(slot->hotplug_slot, 1)) {
warn("failure to update latch file");
}
slot->dev = dev;
} else {
err("%s - no driver attached to device in slot %s",
__FUNCTION__, slot->hotplug_slot->name);
}
dev = pci_get_slot(slot->bus, PCI_DEVFN(slot->number, 0));
if (dev) {
if (update_adapter_status(slot->hotplug_slot, 1))
warn("failure to update adapter file");
if (update_latch_status(slot->hotplug_slot, 1))
warn("failure to update latch file");
slot->dev = dev;
}
}
up_read(&list_rwsem);
@ -416,26 +406,28 @@ static int
check_slots(void)
{
struct slot *slot;
struct list_head *tmp;
int extracted;
int inserted;
u16 hs_csr;
down_read(&list_rwsem);
if(!slots) {
if (!slots) {
up_read(&list_rwsem);
err("no slots registered, shutting down");
return -1;
}
extracted = inserted = 0;
list_for_each(tmp, &slot_list) {
slot = list_entry(tmp, struct slot, slot_list);
list_for_each_entry(slot, &slot_list, slot_list) {
dbg("%s - looking at slot %s",
__FUNCTION__, slot->hotplug_slot->name);
if(cpci_check_and_clear_ins(slot)) {
/* Some broken hardware (e.g. PLX 9054AB) asserts ENUM# twice... */
if(slot->dev) {
warn("slot %s already inserted", slot->hotplug_slot->name);
if (cpci_check_and_clear_ins(slot)) {
/*
* Some broken hardware (e.g. PLX 9054AB) asserts
* ENUM# twice...
*/
if (slot->dev) {
warn("slot %s already inserted",
slot->hotplug_slot->name);
inserted++;
continue;
}
@ -452,7 +444,7 @@ check_slots(void)
/* Configure device */
dbg("%s - configuring slot %s",
__FUNCTION__, slot->hotplug_slot->name);
if(cpci_configure_slot(slot)) {
if (cpci_configure_slot(slot)) {
err("%s - could not configure slot %s",
__FUNCTION__, slot->hotplug_slot->name);
continue;
@ -465,13 +457,11 @@ check_slots(void)
dbg("%s - slot %s HS_CSR (2) = %04x",
__FUNCTION__, slot->hotplug_slot->name, hs_csr);
if(update_latch_status(slot->hotplug_slot, 1)) {
if (update_latch_status(slot->hotplug_slot, 1))
warn("failure to update latch file");
}
if(update_adapter_status(slot->hotplug_slot, 1)) {
if (update_adapter_status(slot->hotplug_slot, 1))
warn("failure to update adapter file");
}
cpci_led_off(slot);
@ -481,7 +471,7 @@ check_slots(void)
__FUNCTION__, slot->hotplug_slot->name, hs_csr);
inserted++;
} else if(cpci_check_ext(slot)) {
} else if (cpci_check_ext(slot)) {
/* Process extraction request */
dbg("%s - slot %s extracted",
__FUNCTION__, slot->hotplug_slot->name);
@ -491,27 +481,25 @@ check_slots(void)
dbg("%s - slot %s HS_CSR = %04x",
__FUNCTION__, slot->hotplug_slot->name, hs_csr);
if(!slot->extracting) {
if(update_latch_status(slot->hotplug_slot, 0)) {
if (!slot->extracting) {
if (update_latch_status(slot->hotplug_slot, 0)) {
warn("failure to update latch file");
}
atomic_inc(&extracting);
slot->extracting = 1;
atomic_inc(&extracting);
}
extracted++;
} else if(slot->extracting) {
} else if (slot->extracting) {
hs_csr = cpci_get_hs_csr(slot);
if(hs_csr == 0xffff) {
if (hs_csr == 0xffff) {
/*
* Hmmm, we're likely hosed at this point, should we
* bother trying to tell the driver or not?
*/
err("card in slot %s was improperly removed",
slot->hotplug_slot->name);
if(update_adapter_status(slot->hotplug_slot, 0)) {
if (update_adapter_status(slot->hotplug_slot, 0))
warn("failure to update adapter file");
}
slot->extracting = 0;
atomic_dec(&extracting);
}
@ -520,10 +508,9 @@ check_slots(void)
up_read(&list_rwsem);
dbg("inserted=%d, extracted=%d, extracting=%d",
inserted, extracted, atomic_read(&extracting));
if(inserted || extracted) {
if (inserted || extracted)
return extracted;
}
else if(!atomic_read(&extracting)) {
else if (!atomic_read(&extracting)) {
err("cannot find ENUM# source, shutting down");
return -1;
}
@ -541,12 +528,12 @@ event_thread(void *data)
unlock_kernel();
dbg("%s - event thread started", __FUNCTION__);
while(1) {
while (1) {
dbg("event thread sleeping");
down_interruptible(&event_semaphore);
dbg("event thread woken, thread_finished = %d",
thread_finished);
if(thread_finished || signal_pending(current))
if (thread_finished || signal_pending(current))
break;
do {
rc = check_slots();
@ -558,7 +545,9 @@ event_thread(void *data)
thread_finished = 1;
break;
}
} while(atomic_read(&extracting) != 0);
} while (atomic_read(&extracting) && !thread_finished);
if (thread_finished)
break;
/* Re-enable ENUM# interrupt */
dbg("%s - re-enabling irq", __FUNCTION__);
@ -579,21 +568,21 @@ poll_thread(void *data)
daemonize("cpci_hp_polld");
unlock_kernel();
while(1) {
if(thread_finished || signal_pending(current))
while (1) {
if (thread_finished || signal_pending(current))
break;
if(controller->ops->query_enum()) {
if (controller->ops->query_enum()) {
do {
rc = check_slots();
if(rc > 0) {
if (rc > 0) {
/* Give userspace a chance to handle extraction */
msleep(500);
} else if(rc < 0) {
} else if (rc < 0) {
dbg("%s - error checking slots", __FUNCTION__);
thread_finished = 1;
break;
}
} while(atomic_read(&extracting) != 0);
} while (atomic_read(&extracting) && !thread_finished);
}
msleep(100);
}
@ -612,12 +601,11 @@ cpci_start_thread(void)
init_MUTEX_LOCKED(&thread_exit);
thread_finished = 0;
if(controller->irq) {
if (controller->irq)
pid = kernel_thread(event_thread, NULL, 0);
} else {
else
pid = kernel_thread(poll_thread, NULL, 0);
}
if(pid < 0) {
if (pid < 0) {
err("Can't start up our thread");
return -1;
}
@ -630,9 +618,8 @@ cpci_stop_thread(void)
{
thread_finished = 1;
dbg("thread finish command given");
if(controller->irq) {
if (controller->irq)
up(&event_semaphore);
}
dbg("wait for thread to exit");
down(&thread_exit);
}
@ -642,45 +629,67 @@ cpci_hp_register_controller(struct cpci_hp_controller *new_controller)
{
int status = 0;
if(!controller) {
controller = new_controller;
if(controller->irq) {
if(request_irq(controller->irq,
cpci_hp_intr,
controller->irq_flags,
MY_NAME, controller->dev_id)) {
err("Can't get irq %d for the hotplug cPCI controller", controller->irq);
status = -ENODEV;
}
dbg("%s - acquired controller irq %d", __FUNCTION__,
controller->irq);
if (controller)
return -1;
if (!(new_controller && new_controller->ops))
return -EINVAL;
if (new_controller->irq) {
if (!(new_controller->ops->enable_irq &&
new_controller->ops->disable_irq))
status = -EINVAL;
if (request_irq(new_controller->irq,
cpci_hp_intr,
new_controller->irq_flags,
MY_NAME,
new_controller->dev_id)) {
err("Can't get irq %d for the hotplug cPCI controller",
new_controller->irq);
status = -ENODEV;
}
} else {
err("cPCI hotplug controller already registered");
status = -1;
dbg("%s - acquired controller irq %d",
__FUNCTION__, new_controller->irq);
}
if (!status)
controller = new_controller;
return status;
}
static void
cleanup_slots(void)
{
struct slot *slot;
struct slot *tmp;
/*
* Unregister all of our slots with the pci_hotplug subsystem,
* and free up all memory that we had allocated.
*/
down_write(&list_rwsem);
if (!slots)
goto cleanup_null;
list_for_each_entry_safe(slot, tmp, &slot_list, slot_list) {
list_del(&slot->slot_list);
pci_hp_deregister(slot->hotplug_slot);
}
cleanup_null:
up_write(&list_rwsem);
return;
}
int
cpci_hp_unregister_controller(struct cpci_hp_controller *old_controller)
{
int status = 0;
if(controller) {
if(atomic_read(&extracting) != 0) {
return -EBUSY;
}
if(!thread_finished) {
if (controller) {
if (!thread_finished)
cpci_stop_thread();
}
if(controller->irq) {
if (controller->irq)
free_irq(controller->irq, controller->dev_id);
}
controller = NULL;
} else {
cleanup_slots();
} else
status = -ENODEV;
}
return status;
}
@ -691,32 +700,28 @@ cpci_hp_start(void)
int status;
dbg("%s - enter", __FUNCTION__);
if(!controller) {
if (!controller)
return -ENODEV;
}
down_read(&list_rwsem);
if(list_empty(&slot_list)) {
if (list_empty(&slot_list)) {
up_read(&list_rwsem);
return -ENODEV;
}
up_read(&list_rwsem);
if(first) {
status = init_slots();
if(status) {
return status;
}
status = init_slots(first);
if (first)
first = 0;
}
if (status)
return status;
status = cpci_start_thread();
if(status) {
if (status)
return status;
}
dbg("%s - thread started", __FUNCTION__);
if(controller->irq) {
if (controller->irq) {
/* Start enum interrupt processing */
dbg("%s - enabling irq", __FUNCTION__);
controller->ops->enable_irq();
@ -728,13 +733,9 @@ cpci_hp_start(void)
int
cpci_hp_stop(void)
{
if(!controller) {
if (!controller)
return -ENODEV;
}
if(atomic_read(&extracting) != 0) {
return -EBUSY;
}
if(controller->irq) {
if (controller->irq) {
/* Stop enum interrupt processing */
dbg("%s - disabling irq", __FUNCTION__);
controller->ops->disable_irq();
@ -743,34 +744,6 @@ cpci_hp_stop(void)
return 0;
}
static void __exit
cleanup_slots(void)
{
struct list_head *tmp;
struct slot *slot;
/*
* Unregister all of our slots with the pci_hotplug subsystem,
* and free up all memory that we had allocated.
*/
down_write(&list_rwsem);
if(!slots) {
goto null_cleanup;
}
list_for_each(tmp, &slot_list) {
slot = list_entry(tmp, struct slot, slot_list);
list_del(&slot->slot_list);
pci_hp_deregister(slot->hotplug_slot);
kfree(slot->hotplug_slot->info);
kfree(slot->hotplug_slot->name);
kfree(slot->hotplug_slot);
kfree(slot);
}
null_cleanup:
up_write(&list_rwsem);
return;
}
int __init
cpci_hotplug_init(int debug)
{
@ -784,7 +757,8 @@ cpci_hotplug_exit(void)
/*
* Clean everything up.
*/
cleanup_slots();
cpci_hp_stop();
cpci_hp_unregister_controller(controller);
}
EXPORT_SYMBOL_GPL(cpci_hp_register_controller);

View file

@ -1,7 +1,7 @@
/*
* CompactPCI Hot Plug Driver PCI functions
*
* Copyright (C) 2002 by SOMA Networks, Inc.
* Copyright (C) 2002,2005 by SOMA Networks, Inc.
*
* All rights reserved.
*
@ -38,10 +38,10 @@ extern int cpci_debug;
#define dbg(format, arg...) \
do { \
if(cpci_debug) \
if (cpci_debug) \
printk (KERN_DEBUG "%s: " format "\n", \
MY_NAME , ## arg); \
} while(0)
} while (0)
#define err(format, arg...) printk(KERN_ERR "%s: " format "\n", MY_NAME , ## arg)
#define info(format, arg...) printk(KERN_INFO "%s: " format "\n", MY_NAME , ## arg)
#define warn(format, arg...) printk(KERN_WARNING "%s: " format "\n", MY_NAME , ## arg)
@ -57,16 +57,15 @@ u8 cpci_get_attention_status(struct slot* slot)
hs_cap = pci_bus_find_capability(slot->bus,
slot->devfn,
PCI_CAP_ID_CHSWP);
if(!hs_cap) {
if (!hs_cap)
return 0;
}
if(pci_bus_read_config_word(slot->bus,
if (pci_bus_read_config_word(slot->bus,
slot->devfn,
hs_cap + 2,
&hs_csr)) {
&hs_csr))
return 0;
}
return hs_csr & 0x0008 ? 1 : 0;
}
@ -78,27 +77,22 @@ int cpci_set_attention_status(struct slot* slot, int status)
hs_cap = pci_bus_find_capability(slot->bus,
slot->devfn,
PCI_CAP_ID_CHSWP);
if(!hs_cap) {
if (!hs_cap)
return 0;
}
if(pci_bus_read_config_word(slot->bus,
if (pci_bus_read_config_word(slot->bus,
slot->devfn,
hs_cap + 2,
&hs_csr)) {
&hs_csr))
return 0;
}
if(status) {
if (status)
hs_csr |= HS_CSR_LOO;
} else {
else
hs_csr &= ~HS_CSR_LOO;
}
if(pci_bus_write_config_word(slot->bus,
if (pci_bus_write_config_word(slot->bus,
slot->devfn,
hs_cap + 2,
hs_csr)) {
hs_csr))
return 0;
}
return 1;
}
@ -110,16 +104,13 @@ u16 cpci_get_hs_csr(struct slot* slot)
hs_cap = pci_bus_find_capability(slot->bus,
slot->devfn,
PCI_CAP_ID_CHSWP);
if(!hs_cap) {
if (!hs_cap)
return 0xFFFF;
}
if(pci_bus_read_config_word(slot->bus,
if (pci_bus_read_config_word(slot->bus,
slot->devfn,
hs_cap + 2,
&hs_csr)) {
&hs_csr))
return 0xFFFF;
}
return hs_csr;
}
@ -132,24 +123,22 @@ int cpci_check_and_clear_ins(struct slot* slot)
hs_cap = pci_bus_find_capability(slot->bus,
slot->devfn,
PCI_CAP_ID_CHSWP);
if(!hs_cap) {
if (!hs_cap)
return 0;
}
if(pci_bus_read_config_word(slot->bus,
if (pci_bus_read_config_word(slot->bus,
slot->devfn,
hs_cap + 2,
&hs_csr)) {
&hs_csr))
return 0;
}
if(hs_csr & HS_CSR_INS) {
if (hs_csr & HS_CSR_INS) {
/* Clear INS (by setting it) */
if(pci_bus_write_config_word(slot->bus,
if (pci_bus_write_config_word(slot->bus,
slot->devfn,
hs_cap + 2,
hs_csr)) {
hs_csr))
ins = 0;
}
ins = 1;
else
ins = 1;
}
return ins;
}
@ -163,18 +152,15 @@ int cpci_check_ext(struct slot* slot)
hs_cap = pci_bus_find_capability(slot->bus,
slot->devfn,
PCI_CAP_ID_CHSWP);
if(!hs_cap) {
if (!hs_cap)
return 0;
}
if(pci_bus_read_config_word(slot->bus,
if (pci_bus_read_config_word(slot->bus,
slot->devfn,
hs_cap + 2,
&hs_csr)) {
&hs_csr))
return 0;
}
if(hs_csr & HS_CSR_EXT) {
if (hs_csr & HS_CSR_EXT)
ext = 1;
}
return ext;
}
@ -186,23 +172,20 @@ int cpci_clear_ext(struct slot* slot)
hs_cap = pci_bus_find_capability(slot->bus,
slot->devfn,
PCI_CAP_ID_CHSWP);
if(!hs_cap) {
if (!hs_cap)
return -ENODEV;
}
if(pci_bus_read_config_word(slot->bus,
if (pci_bus_read_config_word(slot->bus,
slot->devfn,
hs_cap + 2,
&hs_csr)) {
&hs_csr))
return -ENODEV;
}
if(hs_csr & HS_CSR_EXT) {
if (hs_csr & HS_CSR_EXT) {
/* Clear EXT (by setting it) */
if(pci_bus_write_config_word(slot->bus,
if (pci_bus_write_config_word(slot->bus,
slot->devfn,
hs_cap + 2,
hs_csr)) {
hs_csr))
return -ENODEV;
}
}
return 0;
}
@ -215,18 +198,16 @@ int cpci_led_on(struct slot* slot)
hs_cap = pci_bus_find_capability(slot->bus,
slot->devfn,
PCI_CAP_ID_CHSWP);
if(!hs_cap) {
if (!hs_cap)
return -ENODEV;
}
if(pci_bus_read_config_word(slot->bus,
if (pci_bus_read_config_word(slot->bus,
slot->devfn,
hs_cap + 2,
&hs_csr)) {
&hs_csr))
return -ENODEV;
}
if((hs_csr & HS_CSR_LOO) != HS_CSR_LOO) {
if ((hs_csr & HS_CSR_LOO) != HS_CSR_LOO) {
hs_csr |= HS_CSR_LOO;
if(pci_bus_write_config_word(slot->bus,
if (pci_bus_write_config_word(slot->bus,
slot->devfn,
hs_cap + 2,
hs_csr)) {
@ -246,18 +227,16 @@ int cpci_led_off(struct slot* slot)
hs_cap = pci_bus_find_capability(slot->bus,
slot->devfn,
PCI_CAP_ID_CHSWP);
if(!hs_cap) {
if (!hs_cap)
return -ENODEV;
}
if(pci_bus_read_config_word(slot->bus,
if (pci_bus_read_config_word(slot->bus,
slot->devfn,
hs_cap + 2,
&hs_csr)) {
&hs_csr))
return -ENODEV;
}
if(hs_csr & HS_CSR_LOO) {
if (hs_csr & HS_CSR_LOO) {
hs_csr &= ~HS_CSR_LOO;
if(pci_bus_write_config_word(slot->bus,
if (pci_bus_write_config_word(slot->bus,
slot->devfn,
hs_cap + 2,
hs_csr)) {
@ -274,19 +253,6 @@ int cpci_led_off(struct slot* slot)
* Device configuration functions
*/
static void cpci_enable_device(struct pci_dev *dev)
{
struct pci_bus *bus;
pci_enable_device(dev);
if(dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
bus = dev->subordinate;
list_for_each_entry(dev, &bus->devices, bus_list) {
cpci_enable_device(dev);
}
}
}
int cpci_configure_slot(struct slot* slot)
{
unsigned char busnr;
@ -294,14 +260,14 @@ int cpci_configure_slot(struct slot* slot)
dbg("%s - enter", __FUNCTION__);
if(slot->dev == NULL) {
if (slot->dev == NULL) {
dbg("pci_dev null, finding %02x:%02x:%x",
slot->bus->number, PCI_SLOT(slot->devfn), PCI_FUNC(slot->devfn));
slot->dev = pci_find_slot(slot->bus->number, slot->devfn);
slot->dev = pci_get_slot(slot->bus, slot->devfn);
}
/* Still NULL? Well then scan for it! */
if(slot->dev == NULL) {
if (slot->dev == NULL) {
int n;
dbg("pci_dev still null");
@ -311,10 +277,10 @@ int cpci_configure_slot(struct slot* slot)
*/
n = pci_scan_slot(slot->bus, slot->devfn);
dbg("%s: pci_scan_slot returned %d", __FUNCTION__, n);
if(n > 0)
if (n > 0)
pci_bus_add_devices(slot->bus);
slot->dev = pci_find_slot(slot->bus->number, slot->devfn);
if(slot->dev == NULL) {
slot->dev = pci_get_slot(slot->bus, slot->devfn);
if (slot->dev == NULL) {
err("Could not find PCI device for slot %02x", slot->number);
return 1;
}
@ -329,8 +295,6 @@ int cpci_configure_slot(struct slot* slot)
pci_bus_assign_resources(slot->dev->bus);
cpci_enable_device(slot->dev);
dbg("%s - exit", __FUNCTION__);
return 0;
}
@ -341,15 +305,15 @@ int cpci_unconfigure_slot(struct slot* slot)
struct pci_dev *dev;
dbg("%s - enter", __FUNCTION__);
if(!slot->dev) {
if (!slot->dev) {
err("No device for slot %02x\n", slot->number);
return -ENODEV;
}
for (i = 0; i < 8; i++) {
dev = pci_find_slot(slot->bus->number,
dev = pci_get_slot(slot->bus,
PCI_DEVFN(PCI_SLOT(slot->devfn), i));
if(dev) {
if (dev) {
pci_remove_bus_device(dev);
slot->dev = NULL;
}

View file

@ -1626,7 +1626,7 @@ int shpchprm_set_hpp(
pci_bus->number = func->bus;
devfn = PCI_DEVFN(func->device, func->function);
ab = find_acpi_bridge_by_bus(acpi_bridges_head, ctrl->seg, ctrl->bus);
ab = find_acpi_bridge_by_bus(acpi_bridges_head, ctrl->seg, ctrl->slot_bus);
if (ab) {
if (ab->_hpp) {
@ -1681,7 +1681,7 @@ void shpchprm_enable_card(
| PCI_COMMAND_IO | PCI_COMMAND_MEMORY;
bcmd = bcommand = bcommand | PCI_BRIDGE_CTL_NO_ISA;
ab = find_acpi_bridge_by_bus(acpi_bridges_head, ctrl->seg, ctrl->bus);
ab = find_acpi_bridge_by_bus(acpi_bridges_head, ctrl->seg, ctrl->slot_bus);
if (ab) {
if (ab->_hpp) {
if (ab->_hpp->enable_perr) {

View file

@ -7173,6 +7173,7 @@
080f Sentry5 DDR/SDR RAM Controller
0811 Sentry5 External Interface Core
0816 BCM3302 Sentry5 MIPS32 CPU
1600 NetXtreme BCM5752 Gigabit Ethernet PCI Express
1644 NetXtreme BCM5700 Gigabit Ethernet
1014 0277 Broadcom Vigil B5700 1000Base-T
1028 00d1 Broadcom BCM5700

View file

@ -459,17 +459,6 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_APIC,
#endif /* CONFIG_X86_IO_APIC */
/*
* Via 686A/B: The PCI_INTERRUPT_LINE register for the on-chip
* devices, USB0/1, AC97, MC97, and ACPI, has an unusual feature:
* when written, it makes an internal connection to the PIC.
* For these devices, this register is defined to be 4 bits wide.
* Normally this is fine. However for IO-APIC motherboards, or
* non-x86 architectures (yes Via exists on PPC among other places),
* we must mask the PCI_INTERRUPT_LINE value versus 0xf to get
* interrupts delivered properly.
*/
/*
* FIXME: it is questionable that quirk_via_acpi
* is needed. It shows up as an ISA bridge, and does not
@ -492,28 +481,30 @@ static void __devinit quirk_via_acpi(struct pci_dev *d)
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3, quirk_via_acpi );
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4, quirk_via_acpi );
static void quirk_via_irqpic(struct pci_dev *dev)
/*
* Via 686A/B: The PCI_INTERRUPT_LINE register for the on-chip
* devices, USB0/1, AC97, MC97, and ACPI, has an unusual feature:
* when written, it makes an internal connection to the PIC.
* For these devices, this register is defined to be 4 bits wide.
* Normally this is fine. However for IO-APIC motherboards, or
* non-x86 architectures (yes Via exists on PPC among other places),
* we must mask the PCI_INTERRUPT_LINE value versus 0xf to get
* interrupts delivered properly.
*/
static void quirk_via_irq(struct pci_dev *dev)
{
u8 irq, new_irq;
#ifdef CONFIG_X86_IO_APIC
if (nr_ioapics && !skip_ioapic_setup)
return;
#endif
#ifdef CONFIG_ACPI
if (acpi_irq_model != ACPI_IRQ_MODEL_PIC)
return;
#endif
new_irq = dev->irq & 0xf;
pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
if (new_irq != irq) {
printk(KERN_INFO "PCI: Via PIC IRQ fixup for %s, from %d to %d\n",
printk(KERN_INFO "PCI: Via IRQ fixup for %s, from %d to %d\n",
pci_name(dev), irq, new_irq);
udelay(15); /* unknown if delay really needed */
pci_write_config_byte(dev, PCI_INTERRUPT_LINE, new_irq);
}
}
DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_ANY_ID, quirk_via_irqpic);
DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_ANY_ID, quirk_via_irq);
/*
* PIIX3 USB: We have to disable USB interrupts that are

View file

@ -665,15 +665,6 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
return ata_pci_init_one(pdev, port_info, n_ports);
}
/**
* piix_init -
*
* LOCKING:
*
* RETURNS:
*
*/
static int __init piix_init(void)
{
int rc;
@ -689,13 +680,6 @@ static int __init piix_init(void)
return 0;
}
/**
* piix_exit -
*
* LOCKING:
*
*/
static void __exit piix_exit(void)
{
pci_unregister_driver(&piix_pci_driver);

Some files were not shown because too many files have changed in this diff Show more