diff --git a/Documentation/DocBook/Makefile b/Documentation/DocBook/Makefile
index ab8300f6718..ee34ceb9ad5 100644
--- a/Documentation/DocBook/Makefile
+++ b/Documentation/DocBook/Makefile
@@ -8,7 +8,7 @@
DOCBOOKS := z8530book.xml mcabook.xml device-drivers.xml \
kernel-hacking.xml kernel-locking.xml deviceiobook.xml \
- procfs-guide.xml writing_usb_driver.xml networking.xml \
+ writing_usb_driver.xml networking.xml \
kernel-api.xml filesystems.xml lsm.xml usb.xml kgdb.xml \
gadget.xml libata.xml mtdnand.xml librs.xml rapidio.xml \
genericirq.xml s390-drivers.xml uio-howto.xml scsi.xml \
@@ -65,7 +65,7 @@ KERNELDOC = $(srctree)/scripts/kernel-doc
DOCPROC = $(objtree)/scripts/basic/docproc
XMLTOFLAGS = -m $(srctree)/Documentation/DocBook/stylesheet.xsl
-#XMLTOFLAGS += --skip-validation
+XMLTOFLAGS += --skip-validation
###
# DOCPROC is used for two purposes:
@@ -101,17 +101,6 @@ endif
# Changes in kernel-doc force a rebuild of all documentation
$(BOOKS): $(KERNELDOC)
-###
-# procfs guide uses a .c file as example code.
-# This requires an explicit dependency
-C-procfs-example = procfs_example.xml
-C-procfs-example2 = $(addprefix $(obj)/,$(C-procfs-example))
-$(obj)/procfs-guide.xml: $(C-procfs-example2)
-
-# List of programs to build
-##oops, this is a kernel module::hostprogs-y := procfs_example
-obj-m += procfs_example.o
-
# Tell kbuild to always build the programs
always := $(hostprogs-y)
@@ -238,7 +227,7 @@ clean-files := $(DOCBOOKS) \
$(patsubst %.xml, %.pdf, $(DOCBOOKS)) \
$(patsubst %.xml, %.html, $(DOCBOOKS)) \
$(patsubst %.xml, %.9, $(DOCBOOKS)) \
- $(C-procfs-example) $(index)
+ $(index)
clean-dirs := $(patsubst %.xml,%,$(DOCBOOKS)) man
diff --git a/Documentation/DocBook/procfs-guide.tmpl b/Documentation/DocBook/procfs-guide.tmpl
deleted file mode 100644
index 9eba4b7af73..00000000000
--- a/Documentation/DocBook/procfs-guide.tmpl
+++ /dev/null
@@ -1,626 +0,0 @@
-
-
-]>
-
-
-
- Linux Kernel Procfs Guide
-
-
-
- Erik
- (J.A.K.)
- Mouw
-
-
- mouw@nl.linux.org
-
-
-
-
-
- This software and documentation were written while working on the
- LART computing board
- (http://www.lartmaker.nl/),
- which was sponsored by the Delt University of Technology projects
- Mobile Multi-media Communications and Ubiquitous Communications.
-
-
-
-
-
-
- 1.0
- May 30, 2001
- Initial revision posted to linux-kernel
-
-
- 1.1
- June 3, 2001
- Revised after comments from linux-kernel
-
-
-
-
- 2001
- Erik Mouw
-
-
-
-
-
- This documentation is free software; you can redistribute it
- and/or modify it under the terms of the GNU General Public
- License as published by the Free Software Foundation; either
- version 2 of the License, or (at your option) any later
- version.
-
-
-
- This documentation is distributed in the hope that it will be
- useful, but WITHOUT ANY WARRANTY; without even the implied
- warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- PURPOSE. See the GNU General Public License for more details.
-
-
-
- You should have received a copy of the GNU General Public
- License along with this program; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- MA 02111-1307 USA
-
-
-
- For more details see the file COPYING in the source
- distribution of Linux.
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- Preface
-
-
- This guide describes the use of the procfs file system from
- within the Linux kernel. The idea to write this guide came up on
- the #kernelnewbies IRC channel (see http://www.kernelnewbies.org/),
- when Jeff Garzik explained the use of procfs and forwarded me a
- message Alexander Viro wrote to the linux-kernel mailing list. I
- agreed to write it up nicely, so here it is.
-
-
-
- I'd like to thank Jeff Garzik
- jgarzik@pobox.com and Alexander Viro
- viro@parcelfarce.linux.theplanet.co.uk for their input,
- Tim Waugh twaugh@redhat.com for his Selfdocbook,
- and Marc Joosen marcj@historia.et.tudelft.nl for
- proofreading.
-
-
-
- Erik
-
-
-
-
-
-
-
- Introduction
-
-
- The /proc file system
- (procfs) is a special file system in the linux kernel. It's a
- virtual file system: it is not associated with a block device
- but exists only in memory. The files in the procfs are there to
- allow userland programs access to certain information from the
- kernel (like process information in /proc/[0-9]+/), but also for debug
- purposes (like /proc/ksyms).
-
-
-
- This guide describes the use of the procfs file system from
- within the Linux kernel. It starts by introducing all relevant
- functions to manage the files within the file system. After that
- it shows how to communicate with userland, and some tips and
- tricks will be pointed out. Finally a complete example will be
- shown.
-
-
-
- Note that the files in /proc/sys are sysctl files: they
- don't belong to procfs and are governed by a completely
- different API described in the Kernel API book.
-
-
-
-
-
-
-
- Managing procfs entries
-
-
- This chapter describes the functions that various kernel
- components use to populate the procfs with files, symlinks,
- device nodes, and directories.
-
-
-
- A minor note before we start: if you want to use any of the
- procfs functions, be sure to include the correct header file!
- This should be one of the first lines in your code:
-
-
-
-#include <linux/proc_fs.h>
-
-
-
-
-
-
- Creating a regular file
-
-
-
- struct proc_dir_entry* create_proc_entry
- const char* name
- mode_t mode
- struct proc_dir_entry* parent
-
-
-
-
- This function creates a regular file with the name
- name, file mode
- mode in the directory
- parent. To create a file in the root of
- the procfs, use NULL as
- parent parameter. When successful, the
- function will return a pointer to the freshly created
- struct proc_dir_entry; otherwise it
- will return NULL. describes how to do something useful with
- regular files.
-
-
-
- Note that it is specifically supported that you can pass a
- path that spans multiple directories. For example
- create_proc_entry("drivers/via0/info")
- will create the via0
- directory if necessary, with standard
- 0755 permissions.
-
-
-
- If you only want to be able to read the file, the function
- create_proc_read_entry described in may be used to create and initialise
- the procfs entry in one single call.
-
-
-
-
-
-
-
- Creating a symlink
-
-
-
- struct proc_dir_entry*
- proc_symlink const
- char* name
- struct proc_dir_entry*
- parent const
- char* dest
-
-
-
-
- This creates a symlink in the procfs directory
- parent that points from
- name to
- dest. This translates in userland to
- ln -s dest
- name.
-
-
-
-
- Creating a directory
-
-
-
- struct proc_dir_entry* proc_mkdir
- const char* name
- struct proc_dir_entry* parent
-
-
-
-
- Create a directory name in the procfs
- directory parent.
-
-
-
-
-
-
-
- Removing an entry
-
-
-
- void remove_proc_entry
- const char* name
- struct proc_dir_entry* parent
-
-
-
-
- Removes the entry name in the directory
- parent from the procfs. Entries are
- removed by their name, not by the
- struct proc_dir_entry returned by the
- various create functions. Note that this function doesn't
- recursively remove entries.
-
-
-
- Be sure to free the data entry from
- the struct proc_dir_entry before
- remove_proc_entry is called (that is: if
- there was some data allocated, of
- course). See for more information
- on using the data entry.
-
-
-
-
-
-
-
-
- Communicating with userland
-
-
- Instead of reading (or writing) information directly from
- kernel memory, procfs works with call back
- functions for files: functions that are called when
- a specific file is being read or written. Such functions have
- to be initialised after the procfs file is created by setting
- the read_proc and/or
- write_proc fields in the
- struct proc_dir_entry* that the
- function create_proc_entry returned:
-
-
-
-struct proc_dir_entry* entry;
-
-entry->read_proc = read_proc_foo;
-entry->write_proc = write_proc_foo;
-
-
-
- If you only want to use a the
- read_proc, the function
- create_proc_read_entry described in may be used to create and initialise the
- procfs entry in one single call.
-
-
-
-
-
- Reading data
-
-
- The read function is a call back function that allows userland
- processes to read data from the kernel. The read function
- should have the following format:
-
-
-
-
- int read_func
- char* buffer
- char** start
- off_t off
- int count
- int* peof
- void* data
-
-
-
-
- The read function should write its information into the
- buffer, which will be exactly
- PAGE_SIZE bytes long.
-
-
-
- The parameter
- peof should be used to signal that the
- end of the file has been reached by writing
- 1 to the memory location
- peof points to.
-
-
-
- The data
- parameter can be used to create a single call back function for
- several files, see .
-
-
-
- The rest of the parameters and the return value are described
- by a comment in fs/proc/generic.c as follows:
-
-
-
-
- You have three ways to return data:
-
-
-
-
- Leave *start = NULL. (This is the default.)
- Put the data of the requested offset at that
- offset within the buffer. Return the number (n)
- of bytes there are from the beginning of the
- buffer up to the last byte of data. If the
- number of supplied bytes (= n - offset) is
- greater than zero and you didn't signal eof
- and the reader is prepared to take more data
- you will be called again with the requested
- offset advanced by the number of bytes
- absorbed. This interface is useful for files
- no larger than the buffer.
-
-
-
-
- Set *start to an unsigned long value less than
- the buffer address but greater than zero.
- Put the data of the requested offset at the
- beginning of the buffer. Return the number of
- bytes of data placed there. If this number is
- greater than zero and you didn't signal eof
- and the reader is prepared to take more data
- you will be called again with the requested
- offset advanced by *start. This interface is
- useful when you have a large file consisting
- of a series of blocks which you want to count
- and return as wholes.
- (Hack by Paul.Russell@rustcorp.com.au)
-
-
-
-
- Set *start to an address within the buffer.
- Put the data of the requested offset at *start.
- Return the number of bytes of data placed there.
- If this number is greater than zero and you
- didn't signal eof and the reader is prepared to
- take more data you will be called again with the
- requested offset advanced by the number of bytes
- absorbed.
-
-
-
-
-
-
- shows how to use a read call back
- function.
-
-
-
-
-
-
-
- Writing data
-
-
- The write call back function allows a userland process to write
- data to the kernel, so it has some kind of control over the
- kernel. The write function should have the following format:
-
-
-
-
- int write_func
- struct file* file
- const char* buffer
- unsigned long count
- void* data
-
-
-
-
- The write function should read count
- bytes at maximum from the buffer. Note
- that the buffer doesn't live in the
- kernel's memory space, so it should first be copied to kernel
- space with copy_from_user. The
- file parameter is usually
- ignored. shows how to use the
- data parameter.
-
-
-
- Again, shows how to use this call back
- function.
-
-
-
-
-
-
-
- A single call back for many files
-
-
- When a large number of almost identical files is used, it's
- quite inconvenient to use a separate call back function for
- each file. A better approach is to have a single call back
- function that distinguishes between the files by using the
- data field in struct
- proc_dir_entry. First of all, the
- data field has to be initialised:
-
-
-
-struct proc_dir_entry* entry;
-struct my_file_data *file_data;
-
-file_data = kmalloc(sizeof(struct my_file_data), GFP_KERNEL);
-entry->data = file_data;
-
-
-
- The data field is a void
- *, so it can be initialised with anything.
-
-
-
- Now that the data field is set, the
- read_proc and
- write_proc can use it to distinguish
- between files because they get it passed into their
- data parameter:
-
-
-
-int foo_read_func(char *page, char **start, off_t off,
- int count, int *eof, void *data)
-{
- int len;
-
- if(data == file_data) {
- /* special case for this file */
- } else {
- /* normal processing */
- }
-
- return len;
-}
-
-
-
- Be sure to free the data data field
- when removing the procfs entry.
-
-
-
-
-
-
-
-
- Tips and tricks
-
-
-
-
-
- Convenience functions
-
-
-
- struct proc_dir_entry* create_proc_read_entry
- const char* name
- mode_t mode
- struct proc_dir_entry* parent
- read_proc_t* read_proc
- void* data
-
-
-
-
- This function creates a regular file in exactly the same way
- as create_proc_entry from does, but also allows to set the read
- function read_proc in one call. This
- function can set the data as well, like
- explained in .
-
-
-
-
-
-
- Modules
-
-
- If procfs is being used from within a module, be sure to set
- the owner field in the
- struct proc_dir_entry to
- THIS_MODULE.
-
-
-
-struct proc_dir_entry* entry;
-
-entry->owner = THIS_MODULE;
-
-
-
-
-
-
-
- Mode and ownership
-
-
- Sometimes it is useful to change the mode and/or ownership of
- a procfs entry. Here is an example that shows how to achieve
- that:
-
-
-
-struct proc_dir_entry* entry;
-
-entry->mode = S_IWUSR |S_IRUSR | S_IRGRP | S_IROTH;
-entry->uid = 0;
-entry->gid = 100;
-
-
-
-
-
-
-
-
-
- Example
-
-
-
-&procfsexample;
-
-
-
diff --git a/Documentation/DocBook/procfs_example.c b/Documentation/DocBook/procfs_example.c
deleted file mode 100644
index a5b11793b1e..00000000000
--- a/Documentation/DocBook/procfs_example.c
+++ /dev/null
@@ -1,201 +0,0 @@
-/*
- * procfs_example.c: an example proc interface
- *
- * Copyright (C) 2001, Erik Mouw (mouw@nl.linux.org)
- *
- * This file accompanies the procfs-guide in the Linux kernel
- * source. Its main use is to demonstrate the concepts and
- * functions described in the guide.
- *
- * This software has been developed while working on the LART
- * computing board (http://www.lartmaker.nl), which was sponsored
- * by the Delt University of Technology projects Mobile Multi-media
- * Communications and Ubiquitous Communications.
- *
- * This program is free software; you can redistribute
- * it and/or modify it under the terms of the GNU General
- * Public License as published by the Free Software
- * Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be
- * useful, but WITHOUT ANY WARRANTY; without even the implied
- * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public
- * License along with this program; if not, write to the
- * Free Software Foundation, Inc., 59 Temple Place,
- * Suite 330, Boston, MA 02111-1307 USA
- *
- */
-
-#include
-#include
-#include
-#include
-#include
-#include
-
-
-#define MODULE_VERS "1.0"
-#define MODULE_NAME "procfs_example"
-
-#define FOOBAR_LEN 8
-
-struct fb_data_t {
- char name[FOOBAR_LEN + 1];
- char value[FOOBAR_LEN + 1];
-};
-
-
-static struct proc_dir_entry *example_dir, *foo_file,
- *bar_file, *jiffies_file, *symlink;
-
-
-struct fb_data_t foo_data, bar_data;
-
-
-static int proc_read_jiffies(char *page, char **start,
- off_t off, int count,
- int *eof, void *data)
-{
- int len;
-
- len = sprintf(page, "jiffies = %ld\n",
- jiffies);
-
- return len;
-}
-
-
-static int proc_read_foobar(char *page, char **start,
- off_t off, int count,
- int *eof, void *data)
-{
- int len;
- struct fb_data_t *fb_data = (struct fb_data_t *)data;
-
- /* DON'T DO THAT - buffer overruns are bad */
- len = sprintf(page, "%s = '%s'\n",
- fb_data->name, fb_data->value);
-
- return len;
-}
-
-
-static int proc_write_foobar(struct file *file,
- const char *buffer,
- unsigned long count,
- void *data)
-{
- int len;
- struct fb_data_t *fb_data = (struct fb_data_t *)data;
-
- if(count > FOOBAR_LEN)
- len = FOOBAR_LEN;
- else
- len = count;
-
- if(copy_from_user(fb_data->value, buffer, len))
- return -EFAULT;
-
- fb_data->value[len] = '\0';
-
- return len;
-}
-
-
-static int __init init_procfs_example(void)
-{
- int rv = 0;
-
- /* create directory */
- example_dir = proc_mkdir(MODULE_NAME, NULL);
- if(example_dir == NULL) {
- rv = -ENOMEM;
- goto out;
- }
- /* create jiffies using convenience function */
- jiffies_file = create_proc_read_entry("jiffies",
- 0444, example_dir,
- proc_read_jiffies,
- NULL);
- if(jiffies_file == NULL) {
- rv = -ENOMEM;
- goto no_jiffies;
- }
-
- /* create foo and bar files using same callback
- * functions
- */
- foo_file = create_proc_entry("foo", 0644, example_dir);
- if(foo_file == NULL) {
- rv = -ENOMEM;
- goto no_foo;
- }
-
- strcpy(foo_data.name, "foo");
- strcpy(foo_data.value, "foo");
- foo_file->data = &foo_data;
- foo_file->read_proc = proc_read_foobar;
- foo_file->write_proc = proc_write_foobar;
-
- bar_file = create_proc_entry("bar", 0644, example_dir);
- if(bar_file == NULL) {
- rv = -ENOMEM;
- goto no_bar;
- }
-
- strcpy(bar_data.name, "bar");
- strcpy(bar_data.value, "bar");
- bar_file->data = &bar_data;
- bar_file->read_proc = proc_read_foobar;
- bar_file->write_proc = proc_write_foobar;
-
- /* create symlink */
- symlink = proc_symlink("jiffies_too", example_dir,
- "jiffies");
- if(symlink == NULL) {
- rv = -ENOMEM;
- goto no_symlink;
- }
-
- /* everything OK */
- printk(KERN_INFO "%s %s initialised\n",
- MODULE_NAME, MODULE_VERS);
- return 0;
-
-no_symlink:
- remove_proc_entry("bar", example_dir);
-no_bar:
- remove_proc_entry("foo", example_dir);
-no_foo:
- remove_proc_entry("jiffies", example_dir);
-no_jiffies:
- remove_proc_entry(MODULE_NAME, NULL);
-out:
- return rv;
-}
-
-
-static void __exit cleanup_procfs_example(void)
-{
- remove_proc_entry("jiffies_too", example_dir);
- remove_proc_entry("bar", example_dir);
- remove_proc_entry("foo", example_dir);
- remove_proc_entry("jiffies", example_dir);
- remove_proc_entry(MODULE_NAME, NULL);
-
- printk(KERN_INFO "%s %s removed\n",
- MODULE_NAME, MODULE_VERS);
-}
-
-
-module_init(init_procfs_example);
-module_exit(cleanup_procfs_example);
-
-MODULE_AUTHOR("Erik Mouw");
-MODULE_DESCRIPTION("procfs examples");
-MODULE_LICENSE("GPL");
diff --git a/Documentation/SubmitChecklist b/Documentation/SubmitChecklist
index 78a9168ff37..1053a56be3b 100644
--- a/Documentation/SubmitChecklist
+++ b/Documentation/SubmitChecklist
@@ -15,7 +15,7 @@ kernel patches.
2: Passes allnoconfig, allmodconfig
3: Builds on multiple CPU architectures by using local cross-compile tools
- or something like PLM at OSDL.
+ or some other build farm.
4: ppc64 is a good architecture for cross-compilation checking because it
tends to use `unsigned long' for 64-bit quantities.
@@ -88,3 +88,6 @@ kernel patches.
24: All memory barriers {e.g., barrier(), rmb(), wmb()} need a comment in the
source code that explains the logic of what they are doing and why.
+
+25: If any ioctl's are added by the patch, then also update
+ Documentation/ioctl/ioctl-number.txt.
diff --git a/Documentation/fb/viafb.txt b/Documentation/fb/viafb.txt
index 67dbf442b0b..f3e046a6a98 100644
--- a/Documentation/fb/viafb.txt
+++ b/Documentation/fb/viafb.txt
@@ -7,7 +7,7 @@
VIA UniChrome Family(CLE266, PM800 / CN400 / CN300,
P4M800CE / P4M800Pro / CN700 / VN800,
CX700 / VX700, K8M890, P4M890,
- CN896 / P4M900, VX800)
+ CN896 / P4M900, VX800, VX855)
[Driver features]
------------------------
@@ -154,13 +154,6 @@
0 : No Dual Edge Panel (default)
1 : Dual Edge Panel
- viafb_video_dev:
- This option is used to specify video output devices(CRT, DVI, LCD) for
- duoview case.
- For example:
- To output video on DVI, we should use:
- modprobe viafb viafb_video_dev=DVI...
-
viafb_lcd_port:
This option is used to specify LCD output port,
available values are "DVP0" "DVP1" "DFP_HIGHLOW" "DFP_HIGH" "DFP_LOW".
@@ -181,9 +174,6 @@ Notes:
and bpp, need to call VIAFB specified ioctl interface VIAFB_SET_DEVICE
instead of calling common ioctl function FBIOPUT_VSCREENINFO since
viafb doesn't support multi-head well, or it will cause screen crush.
- 4. VX800 2D accelerator hasn't been supported in this driver yet. When
- using driver on VX800, the driver will disable the acceleration
- function as default.
[Configure viafb with "fbset" tool]
diff --git a/Documentation/filesystems/seq_file.txt b/Documentation/filesystems/seq_file.txt
index 0d15ebccf5b..a1e2e0dda90 100644
--- a/Documentation/filesystems/seq_file.txt
+++ b/Documentation/filesystems/seq_file.txt
@@ -248,9 +248,7 @@ code, that is done in the initialization code in the usual way:
{
struct proc_dir_entry *entry;
- entry = create_proc_entry("sequence", 0, NULL);
- if (entry)
- entry->proc_fops = &ct_file_ops;
+ proc_create("sequence", 0, NULL, &ct_file_ops);
return 0;
}
diff --git a/Documentation/gpio.txt b/Documentation/gpio.txt
index e4e7daed2ba..1866c27eec6 100644
--- a/Documentation/gpio.txt
+++ b/Documentation/gpio.txt
@@ -531,6 +531,13 @@ and have the following read/write attributes:
This file exists only if the pin can be configured as an
interrupt generating input pin.
+ "active_low" ... reads as either 0 (false) or 1 (true). Write
+ any nonzero value to invert the value attribute both
+ for reading and writing. Existing and subsequent
+ poll(2) support configuration via the edge attribute
+ for "rising" and "falling" edges will follow this
+ setting.
+
GPIO controllers have paths like /sys/class/gpio/gpiochip42/ (for the
controller implementing GPIOs starting at #42) and have the following
read-only attributes:
@@ -566,6 +573,8 @@ requested using gpio_request():
int gpio_export_link(struct device *dev, const char *name,
unsigned gpio)
+ /* change the polarity of a GPIO node in sysfs */
+ int gpio_sysfs_set_active_low(unsigned gpio, int value);
After a kernel driver requests a GPIO, it may only be made available in
the sysfs interface by gpio_export(). The driver can control whether the
@@ -580,3 +589,9 @@ After the GPIO has been exported, gpio_export_link() allows creating
symlinks from elsewhere in sysfs to the GPIO sysfs node. Drivers can
use this to provide the interface under their own device in sysfs with
a descriptive name.
+
+Drivers can use gpio_sysfs_set_active_low() to hide GPIO line polarity
+differences between boards from user space. This only affects the
+sysfs interface. Polarity change can be done both before and after
+gpio_export(), and previously enabled poll(2) support for either
+rising or falling edge will be reconfigured to follow this setting.
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index ab95d3ada5c..c309515ae95 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -2729,6 +2729,11 @@ and is between 256 and 4096 characters. It is defined in the file
vmpoff= [KNL,S390] Perform z/VM CP command after power off.
Format:
+ vt.cur_default= [VT] Default cursor shape.
+ Format: 0xCCBBAA, where AA, BB, and CC are the same as
+ the parameters of the [?A;B;Cc escape sequence;
+ see VGA-softcursor.txt. Default: 2 = underline.
+
vt.default_blu= [VT]
Format: ,,,...,
Change the default blue palette of the console.
diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
index 5c75c1b2352..9baae8afe8a 100644
--- a/arch/alpha/include/asm/elf.h
+++ b/arch/alpha/include/asm/elf.h
@@ -81,7 +81,6 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
#define ELF_DATA ELFDATA2LSB
#define ELF_ARCH EM_ALPHA
-#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 8192
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
index 6aac3f5bb2f..a399bb5730f 100644
--- a/arch/arm/include/asm/elf.h
+++ b/arch/arm/include/asm/elf.h
@@ -101,7 +101,6 @@ extern int arm_elf_read_implies_exec(const struct elf32_hdr *, int);
int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
#define ELF_CORE_COPY_TASK_REGS dump_task_regs
-#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c
index 62b98bffc15..07de8db1458 100644
--- a/arch/arm/mach-davinci/board-da850-evm.c
+++ b/arch/arm/mach-davinci/board-da850-evm.c
@@ -339,6 +339,15 @@ static struct davinci_mmc_config da850_mmc_config = {
.version = MMC_CTLR_VERSION_2,
};
+static void da850_panel_power_ctrl(int val)
+{
+ /* lcd backlight */
+ gpio_set_value(DA850_LCD_BL_PIN, val);
+
+ /* lcd power */
+ gpio_set_value(DA850_LCD_PWR_PIN, val);
+}
+
static int da850_lcd_hw_init(void)
{
int status;
@@ -356,17 +365,11 @@ static int da850_lcd_hw_init(void)
gpio_direction_output(DA850_LCD_BL_PIN, 0);
gpio_direction_output(DA850_LCD_PWR_PIN, 0);
- /* disable lcd backlight */
- gpio_set_value(DA850_LCD_BL_PIN, 0);
+ /* Switch off panel power and backlight */
+ da850_panel_power_ctrl(0);
- /* disable lcd power */
- gpio_set_value(DA850_LCD_PWR_PIN, 0);
-
- /* enable lcd power */
- gpio_set_value(DA850_LCD_PWR_PIN, 1);
-
- /* enable lcd backlight */
- gpio_set_value(DA850_LCD_BL_PIN, 1);
+ /* Switch on panel power and backlight */
+ da850_panel_power_ctrl(1);
return 0;
}
@@ -674,6 +677,7 @@ static __init void da850_evm_init(void)
pr_warning("da850_evm_init: lcd initialization failed: %d\n",
ret);
+ sharp_lk043t1dg01_pdata.panel_power_ctrl = da850_panel_power_ctrl,
ret = da8xx_register_lcdc(&sharp_lk043t1dg01_pdata);
if (ret)
pr_warning("da850_evm_init: lcdc registration failed: %d\n",
diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
index d5d1d41c600..3b3159b710d 100644
--- a/arch/avr32/include/asm/elf.h
+++ b/arch/avr32/include/asm/elf.h
@@ -77,7 +77,6 @@ typedef struct user_fpu_struct elf_fpregset_t;
#endif
#define ELF_ARCH EM_AVR32
-#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
diff --git a/arch/blackfin/include/asm/bfin-lq035q1.h b/arch/blackfin/include/asm/bfin-lq035q1.h
new file mode 100644
index 00000000000..57bc21ac229
--- /dev/null
+++ b/arch/blackfin/include/asm/bfin-lq035q1.h
@@ -0,0 +1,28 @@
+/*
+ * Blackfin LCD Framebuffer driver SHARP LQ035Q1DH02
+ *
+ * Copyright 2008-2009 Analog Devices Inc.
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef BFIN_LQ035Q1_H
+#define BFIN_LQ035Q1_H
+
+#define LQ035_RL (0 << 8) /* Right -> Left Scan */
+#define LQ035_LR (1 << 8) /* Left -> Right Scan */
+#define LQ035_TB (1 << 9) /* Top -> Botton Scan */
+#define LQ035_BT (0 << 9) /* Botton -> Top Scan */
+#define LQ035_BGR (1 << 11) /* Use BGR format */
+#define LQ035_RGB (0 << 11) /* Use RGB format */
+#define LQ035_NORM (1 << 13) /* Reversal */
+#define LQ035_REV (0 << 13) /* Reversal */
+
+struct bfin_lq035q1fb_disp_info {
+
+ unsigned mode;
+ /* GPIOs */
+ int use_bl;
+ unsigned gpio_bl;
+};
+
+#endif /* BFIN_LQ035Q1_H */
diff --git a/arch/blackfin/include/asm/elf.h b/arch/blackfin/include/asm/elf.h
index 8e0764c81ea..5b50f0ecacf 100644
--- a/arch/blackfin/include/asm/elf.h
+++ b/arch/blackfin/include/asm/elf.h
@@ -55,7 +55,6 @@ do { \
_regs->p2 = _dynamic_addr; \
} while(0)
-#define USE_ELF_CORE_DUMP
#define ELF_FDPIC_CORE_EFLAGS EF_BFIN_FDPIC
#define ELF_EXEC_PAGESIZE 4096
diff --git a/arch/cris/include/asm/elf.h b/arch/cris/include/asm/elf.h
index 0f51b10b9f4..8a3d8e2b33c 100644
--- a/arch/cris/include/asm/elf.h
+++ b/arch/cris/include/asm/elf.h
@@ -64,8 +64,6 @@ typedef unsigned long elf_fpregset_t;
#define EF_CRIS_VARIANT_COMMON_V10_V32 0x00000004
/* End of excerpt from {binutils}/include/elf/cris.h. */
-#define USE_ELF_CORE_DUMP
-
#define ELF_EXEC_PAGESIZE 8192
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
diff --git a/arch/frv/include/asm/elf.h b/arch/frv/include/asm/elf.h
index 7bbf6e47f8c..c3819804a74 100644
--- a/arch/frv/include/asm/elf.h
+++ b/arch/frv/include/asm/elf.h
@@ -115,7 +115,6 @@ do { \
__kernel_frame0_ptr->gr29 = 0; \
} while(0)
-#define USE_ELF_CORE_DUMP
#define CORE_DUMP_USE_REGSET
#define ELF_FDPIC_CORE_EFLAGS EF_FRV_FDPIC
#define ELF_EXEC_PAGESIZE 16384
diff --git a/arch/h8300/include/asm/elf.h b/arch/h8300/include/asm/elf.h
index 94e2284c881..c24fa250d65 100644
--- a/arch/h8300/include/asm/elf.h
+++ b/arch/h8300/include/asm/elf.h
@@ -34,7 +34,6 @@ typedef unsigned long elf_fpregset_t;
#define ELF_PLAT_INIT(_r) _r->er1 = 0
-#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
diff --git a/arch/ia64/ia32/elfcore32.h b/arch/ia64/ia32/elfcore32.h
index 9a3abf58cea..65772574261 100644
--- a/arch/ia64/ia32/elfcore32.h
+++ b/arch/ia64/ia32/elfcore32.h
@@ -11,8 +11,6 @@
#include
#include
-#define USE_ELF_CORE_DUMP 1
-
/* Override elfcore.h */
#define _LINUX_ELFCORE_H 1
typedef unsigned int elf_greg_t;
diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
index 8d3c79cd81e..7d09a09cdaa 100644
--- a/arch/ia64/include/asm/dma-mapping.h
+++ b/arch/ia64/include/asm/dma-mapping.h
@@ -73,7 +73,7 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
if (!dev->dma_mask)
return 0;
- return addr + size <= *dev->dma_mask;
+ return addr + size - 1 <= *dev->dma_mask;
}
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
index 86eddee029c..e14108b19c0 100644
--- a/arch/ia64/include/asm/elf.h
+++ b/arch/ia64/include/asm/elf.h
@@ -25,7 +25,6 @@
#define ELF_DATA ELFDATA2LSB
#define ELF_ARCH EM_IA_64
-#define USE_ELF_CORE_DUMP
#define CORE_DUMP_USE_REGSET
/* Least-significant four bits of ELF header's e_flags are OS-specific. The bits are
diff --git a/arch/ia64/sn/pci/tioca_provider.c b/arch/ia64/sn/pci/tioca_provider.c
index 35b2a27d2e7..efb454534e5 100644
--- a/arch/ia64/sn/pci/tioca_provider.c
+++ b/arch/ia64/sn/pci/tioca_provider.c
@@ -9,6 +9,7 @@
#include
#include
#include
+#include
#include
#include
#include
@@ -369,7 +370,7 @@ tioca_dma_d48(struct pci_dev *pdev, u64 paddr)
static dma_addr_t
tioca_dma_mapped(struct pci_dev *pdev, unsigned long paddr, size_t req_size)
{
- int i, ps, ps_shift, entry, entries, mapsize, last_entry;
+ int ps, ps_shift, entry, entries, mapsize;
u64 xio_addr, end_xio_addr;
struct tioca_common *tioca_common;
struct tioca_kernel *tioca_kern;
@@ -410,23 +411,13 @@ tioca_dma_mapped(struct pci_dev *pdev, unsigned long paddr, size_t req_size)
map = tioca_kern->ca_pcigart_pagemap;
mapsize = tioca_kern->ca_pcigart_entries;
- entry = find_first_zero_bit(map, mapsize);
- while (entry < mapsize) {
- last_entry = find_next_bit(map, mapsize, entry);
-
- if (last_entry - entry >= entries)
- break;
-
- entry = find_next_zero_bit(map, mapsize, last_entry);
- }
-
- if (entry > mapsize) {
+ entry = bitmap_find_next_zero_area(map, mapsize, 0, entries, 0);
+ if (entry >= mapsize) {
kfree(ca_dmamap);
goto map_return;
}
- for (i = 0; i < entries; i++)
- set_bit(entry + i, map);
+ bitmap_set(map, entry, entries);
bus_addr = tioca_kern->ca_pciap_base + (entry * ps);
diff --git a/arch/m32r/include/asm/elf.h b/arch/m32r/include/asm/elf.h
index 0cc34c94bf2..2f85412ef73 100644
--- a/arch/m32r/include/asm/elf.h
+++ b/arch/m32r/include/asm/elf.h
@@ -102,7 +102,6 @@ typedef elf_fpreg_t elf_fpregset_t;
*/
#define ELF_PLAT_INIT(_r, load_addr) (_r)->r0 = 0
-#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE PAGE_SIZE
/*
diff --git a/arch/m68k/include/asm/elf.h b/arch/m68k/include/asm/elf.h
index 0b0f49eb876..01c193d9141 100644
--- a/arch/m68k/include/asm/elf.h
+++ b/arch/m68k/include/asm/elf.h
@@ -59,7 +59,6 @@ typedef struct user_m68kfp_struct elf_fpregset_t;
is actually used on ASV. */
#define ELF_PLAT_INIT(_r, load_addr) _r->a1 = 0
-#define USE_ELF_CORE_DUMP
#ifndef CONFIG_SUN3
#define ELF_EXEC_PAGESIZE 4096
#else
diff --git a/arch/microblaze/include/asm/elf.h b/arch/microblaze/include/asm/elf.h
index f92fc0dda00..7d4acf2b278 100644
--- a/arch/microblaze/include/asm/elf.h
+++ b/arch/microblaze/include/asm/elf.h
@@ -77,7 +77,6 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
#define ELF_DATA ELFDATA2MSB
#endif
-#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
index 7990694cda2..7a6a35dbe52 100644
--- a/arch/mips/include/asm/elf.h
+++ b/arch/mips/include/asm/elf.h
@@ -326,7 +326,6 @@ extern int dump_task_fpu(struct task_struct *, elf_fpregset_t *);
#define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) \
dump_task_fpu(tsk, elf_fpregs)
-#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE PAGE_SIZE
/* This yields a mask that user programs can use to figure out what
diff --git a/arch/mn10300/include/asm/elf.h b/arch/mn10300/include/asm/elf.h
index 75a70aa9fd6..e5fa97cd9a1 100644
--- a/arch/mn10300/include/asm/elf.h
+++ b/arch/mn10300/include/asm/elf.h
@@ -77,7 +77,6 @@ do { \
_ur->a1 = 0; _ur->a0 = 0; _ur->d1 = 0; _ur->d0 = 0; \
} while (0)
-#define USE_ELF_CORE_DUMP
#define CORE_DUMP_USE_REGSET
#define ELF_EXEC_PAGESIZE 4096
diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
index 9c802eb4be8..19f6cb1a4a1 100644
--- a/arch/parisc/include/asm/elf.h
+++ b/arch/parisc/include/asm/elf.h
@@ -328,7 +328,6 @@ struct pt_regs; /* forward declaration... */
such function. */
#define ELF_PLAT_INIT(_r, load_addr) _r->gr[23] = 0
-#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
index e281daebddc..80a973bb9e7 100644
--- a/arch/powerpc/include/asm/dma-mapping.h
+++ b/arch/powerpc/include/asm/dma-mapping.h
@@ -197,7 +197,7 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
if (!dev->dma_mask)
return 0;
- return addr + size <= *dev->dma_mask;
+ return addr + size - 1 <= *dev->dma_mask;
}
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
index 014a624f4c8..17828ad411e 100644
--- a/arch/powerpc/include/asm/elf.h
+++ b/arch/powerpc/include/asm/elf.h
@@ -170,7 +170,6 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
#define elf_check_arch(x) ((x)->e_machine == ELF_ARCH)
#define compat_elf_check_arch(x) ((x)->e_machine == EM_PPC)
-#define USE_ELF_CORE_DUMP
#define CORE_DUMP_USE_REGSET
#define ELF_EXEC_PAGESIZE PAGE_SIZE
diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
index 8c341490cfc..cbd759e3cd7 100644
--- a/arch/powerpc/include/asm/ptrace.h
+++ b/arch/powerpc/include/asm/ptrace.h
@@ -140,6 +140,8 @@ extern void user_enable_single_step(struct task_struct *);
extern void user_enable_block_step(struct task_struct *);
extern void user_disable_single_step(struct task_struct *);
+#define ARCH_HAS_USER_SINGLE_STEP_INFO
+
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index fd51578e29d..5547ae6e6b0 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -30,7 +30,7 @@
#include
#include
#include
-#include
+#include
#include
#include
#include
@@ -251,7 +251,7 @@ static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
}
ppc_md.tce_free(tbl, entry, npages);
- iommu_area_free(tbl->it_map, free_entry, npages);
+ bitmap_clear(tbl->it_map, free_entry, npages);
}
static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 804f0f30f22..d069ff8a7e0 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -174,6 +174,15 @@ int die(const char *str, struct pt_regs *regs, long err)
return 0;
}
+void user_single_step_siginfo(struct task_struct *tsk,
+ struct pt_regs *regs, siginfo_t *info)
+{
+ memset(info, 0, sizeof(*info));
+ info->si_signo = SIGTRAP;
+ info->si_code = TRAP_TRACE;
+ info->si_addr = (void __user *)regs->nip;
+}
+
void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
{
siginfo_t info;
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index e885442c1df..354d42616c7 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -155,7 +155,6 @@ extern unsigned int vdso_enabled;
} while (0)
#define CORE_DUMP_USE_REGSET
-#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
diff --git a/arch/score/include/asm/elf.h b/arch/score/include/asm/elf.h
index 43526d9fda9..f478ce94181 100644
--- a/arch/score/include/asm/elf.h
+++ b/arch/score/include/asm/elf.h
@@ -61,7 +61,6 @@ struct task_struct;
struct pt_regs;
#define CORE_DUMP_USE_REGSET
-#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE PAGE_SIZE
/* This yields a mask that user programs can use to figure out what
diff --git a/arch/sh/include/asm/elf.h b/arch/sh/include/asm/elf.h
index ccb1d93bb04..ac04255022b 100644
--- a/arch/sh/include/asm/elf.h
+++ b/arch/sh/include/asm/elf.h
@@ -114,7 +114,6 @@ typedef struct user_fpu_struct elf_fpregset_t;
*/
#define CORE_DUMP_USE_REGSET
-#define USE_ELF_CORE_DUMP
#define ELF_FDPIC_CORE_EFLAGS EF_SH_FDPIC
#define ELF_EXEC_PAGESIZE PAGE_SIZE
diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
index 381a1b5256d..4269ca6ad18 100644
--- a/arch/sparc/include/asm/elf_32.h
+++ b/arch/sparc/include/asm/elf_32.h
@@ -104,8 +104,6 @@ typedef struct {
#define ELF_CLASS ELFCLASS32
#define ELF_DATA ELFDATA2MSB
-#define USE_ELF_CORE_DUMP
-
#define ELF_EXEC_PAGESIZE 4096
diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
index d42e393078c..ff66bb88537 100644
--- a/arch/sparc/include/asm/elf_64.h
+++ b/arch/sparc/include/asm/elf_64.h
@@ -152,7 +152,6 @@ typedef struct {
(x)->e_machine == EM_SPARC32PLUS)
#define compat_start_thread start_thread32
-#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE PAGE_SIZE
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
index 7690cc219ec..5fad94950e7 100644
--- a/arch/sparc/kernel/iommu.c
+++ b/arch/sparc/kernel/iommu.c
@@ -11,6 +11,7 @@
#include
#include
#include
+#include
#ifdef CONFIG_PCI
#include
@@ -169,7 +170,7 @@ void iommu_range_free(struct iommu *iommu, dma_addr_t dma_addr, unsigned long np
entry = (dma_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
- iommu_area_free(arena->map, entry, npages);
+ bitmap_clear(arena->map, entry, npages);
}
int iommu_table_init(struct iommu *iommu, int tsbsize,
diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c
index e0ba898e30c..df39a0f0d27 100644
--- a/arch/sparc/kernel/ldc.c
+++ b/arch/sparc/kernel/ldc.c
@@ -14,6 +14,7 @@
#include
#include
#include
+#include
#include
#include
@@ -1875,7 +1876,7 @@ EXPORT_SYMBOL(ldc_read);
static long arena_alloc(struct ldc_iommu *iommu, unsigned long npages)
{
struct iommu_arena *arena = &iommu->arena;
- unsigned long n, i, start, end, limit;
+ unsigned long n, start, end, limit;
int pass;
limit = arena->limit;
@@ -1883,7 +1884,7 @@ static long arena_alloc(struct ldc_iommu *iommu, unsigned long npages)
pass = 0;
again:
- n = find_next_zero_bit(arena->map, limit, start);
+ n = bitmap_find_next_zero_area(arena->map, limit, start, npages, 0);
end = n + npages;
if (unlikely(end >= limit)) {
if (likely(pass < 1)) {
@@ -1896,16 +1897,7 @@ again:
return -1;
}
}
-
- for (i = n; i < end; i++) {
- if (test_bit(i, arena->map)) {
- start = i + 1;
- goto again;
- }
- }
-
- for (i = n; i < end; i++)
- __set_bit(i, arena->map);
+ bitmap_set(arena->map, n, npages);
arena->hint = end;
diff --git a/arch/sparc/mm/sun4c.c b/arch/sparc/mm/sun4c.c
index 2ffacd67c42..a89baf0d875 100644
--- a/arch/sparc/mm/sun4c.c
+++ b/arch/sparc/mm/sun4c.c
@@ -17,6 +17,7 @@
#include
#include
#include
+#include
#include
#include
@@ -1021,20 +1022,12 @@ static char *sun4c_lockarea(char *vaddr, unsigned long size)
npages = (((unsigned long)vaddr & ~PAGE_MASK) +
size + (PAGE_SIZE-1)) >> PAGE_SHIFT;
- scan = 0;
local_irq_save(flags);
- for (;;) {
- scan = find_next_zero_bit(sun4c_iobuffer_map,
- iobuffer_map_size, scan);
- if ((base = scan) + npages > iobuffer_map_size) goto abend;
- for (;;) {
- if (scan >= base + npages) goto found;
- if (test_bit(scan, sun4c_iobuffer_map)) break;
- scan++;
- }
- }
+ base = bitmap_find_next_zero_area(sun4c_iobuffer_map, iobuffer_map_size,
+ 0, npages, 0);
+ if (base >= iobuffer_map_size)
+ goto abend;
-found:
high = ((base + npages) << PAGE_SHIFT) + sun4c_iobuffer_start;
high = SUN4C_REAL_PGDIR_ALIGN(high);
while (high > sun4c_iobuffer_high) {
diff --git a/arch/um/sys-i386/asm/elf.h b/arch/um/sys-i386/asm/elf.h
index d0da9d7c537..770885472ed 100644
--- a/arch/um/sys-i386/asm/elf.h
+++ b/arch/um/sys-i386/asm/elf.h
@@ -48,7 +48,6 @@ typedef struct user_i387_struct elf_fpregset_t;
PT_REGS_EAX(regs) = 0; \
} while (0)
-#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
diff --git a/arch/um/sys-ppc/asm/elf.h b/arch/um/sys-ppc/asm/elf.h
index af9463cd8ce..8aacaf56508 100644
--- a/arch/um/sys-ppc/asm/elf.h
+++ b/arch/um/sys-ppc/asm/elf.h
@@ -17,8 +17,6 @@ extern long elf_aux_hwcap;
#define ELF_CLASS ELFCLASS32
#endif
-#define USE_ELF_CORE_DUMP
-
#define R_386_NONE 0
#define R_386_32 1
#define R_386_PC32 2
diff --git a/arch/um/sys-x86_64/asm/elf.h b/arch/um/sys-x86_64/asm/elf.h
index 04b9e87c8da..49655c83efd 100644
--- a/arch/um/sys-x86_64/asm/elf.h
+++ b/arch/um/sys-x86_64/asm/elf.h
@@ -104,7 +104,6 @@ extern int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu);
clear_thread_flag(TIF_IA32);
#endif
-#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
index 0f6c02f3b7d..ac91eed2106 100644
--- a/arch/x86/include/asm/dma-mapping.h
+++ b/arch/x86/include/asm/dma-mapping.h
@@ -67,7 +67,7 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
if (!dev->dma_mask)
return 0;
- return addr + size <= *dev->dma_mask;
+ return addr + size - 1 <= *dev->dma_mask;
}
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index 8a024babe5e..b4501ee223a 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -239,7 +239,6 @@ extern int force_personality32;
#endif /* !CONFIG_X86_32 */
#define CORE_DUMP_USE_REGSET
-#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
index 3d11fd0f44c..9d369f68032 100644
--- a/arch/x86/include/asm/ptrace.h
+++ b/arch/x86/include/asm/ptrace.h
@@ -292,6 +292,8 @@ extern void user_enable_block_step(struct task_struct *);
#define arch_has_block_step() (boot_cpu_data.x86 >= 6)
#endif
+#define ARCH_HAS_USER_SINGLE_STEP_INFO
+
struct user_desc;
extern int do_get_thread_area(struct task_struct *p, int idx,
struct user_desc __user *info);
diff --git a/arch/x86/include/asm/uv/bios.h b/arch/x86/include/asm/uv/bios.h
index 7ed17ff502b..2751f3075d8 100644
--- a/arch/x86/include/asm/uv/bios.h
+++ b/arch/x86/include/asm/uv/bios.h
@@ -76,15 +76,6 @@ union partition_info_u {
};
};
-union uv_watchlist_u {
- u64 val;
- struct {
- u64 blade : 16,
- size : 32,
- filler : 16;
- };
-};
-
enum uv_memprotect {
UV_MEMPROT_RESTRICT_ACCESS,
UV_MEMPROT_ALLOW_AMO,
@@ -100,7 +91,7 @@ extern s64 uv_bios_call_reentrant(enum uv_bios_cmd, u64, u64, u64, u64, u64);
extern s64 uv_bios_get_sn_info(int, int *, long *, long *, long *);
extern s64 uv_bios_freq_base(u64, u64 *);
-extern int uv_bios_mq_watchlist_alloc(int, unsigned long, unsigned int,
+extern int uv_bios_mq_watchlist_alloc(unsigned long, unsigned int,
unsigned long *);
extern int uv_bios_mq_watchlist_free(int, int);
extern s64 uv_bios_change_memprotect(u64, u64, enum uv_memprotect);
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h
index d1414af9855..811bfabc80b 100644
--- a/arch/x86/include/asm/uv/uv_hub.h
+++ b/arch/x86/include/asm/uv/uv_hub.h
@@ -172,6 +172,8 @@ DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
#define UV_LOCAL_MMR_SIZE (64UL * 1024 * 1024)
#define UV_GLOBAL_MMR32_SIZE (64UL * 1024 * 1024)
+#define UV_GLOBAL_GRU_MMR_BASE 0x4000000
+
#define UV_GLOBAL_MMR32_PNODE_SHIFT 15
#define UV_GLOBAL_MMR64_PNODE_SHIFT 26
@@ -232,6 +234,26 @@ static inline unsigned long uv_gpa(void *v)
return uv_soc_phys_ram_to_gpa(__pa(v));
}
+/* Top two bits indicate the requested address is in MMR space. */
+static inline int
+uv_gpa_in_mmr_space(unsigned long gpa)
+{
+ return (gpa >> 62) == 0x3UL;
+}
+
+/* UV global physical address --> socket phys RAM */
+static inline unsigned long uv_gpa_to_soc_phys_ram(unsigned long gpa)
+{
+ unsigned long paddr = gpa & uv_hub_info->gpa_mask;
+ unsigned long remap_base = uv_hub_info->lowmem_remap_base;
+ unsigned long remap_top = uv_hub_info->lowmem_remap_top;
+
+ if (paddr >= remap_base && paddr < remap_base + remap_top)
+ paddr -= remap_base;
+ return paddr;
+}
+
+
/* gnode -> pnode */
static inline unsigned long uv_gpa_to_gnode(unsigned long gpa)
{
@@ -307,6 +329,15 @@ static inline unsigned long uv_read_global_mmr64(int pnode,
return readq(uv_global_mmr64_address(pnode, offset));
}
+/*
+ * Global MMR space addresses when referenced by the GRU. (GRU does
+ * NOT use socket addressing).
+ */
+static inline unsigned long uv_global_gru_mmr_address(int pnode, unsigned long offset)
+{
+ return UV_GLOBAL_GRU_MMR_BASE | offset | (pnode << uv_hub_info->m_val);
+}
+
/*
* Access hub local MMRs. Faster than using global space but only local MMRs
* are accessible.
@@ -434,6 +465,14 @@ static inline void uv_set_cpu_scir_bits(int cpu, unsigned char value)
}
}
+static unsigned long uv_hub_ipi_value(int apicid, int vector, int mode)
+{
+ return (1UL << UVH_IPI_INT_SEND_SHFT) |
+ ((apicid) << UVH_IPI_INT_APIC_ID_SHFT) |
+ (mode << UVH_IPI_INT_DELIVERY_MODE_SHFT) |
+ (vector << UVH_IPI_INT_VECTOR_SHFT);
+}
+
static inline void uv_hub_send_ipi(int pnode, int apicid, int vector)
{
unsigned long val;
@@ -442,10 +481,7 @@ static inline void uv_hub_send_ipi(int pnode, int apicid, int vector)
if (vector == NMI_VECTOR)
dmode = dest_NMI;
- val = (1UL << UVH_IPI_INT_SEND_SHFT) |
- ((apicid) << UVH_IPI_INT_APIC_ID_SHFT) |
- (dmode << UVH_IPI_INT_DELIVERY_MODE_SHFT) |
- (vector << UVH_IPI_INT_VECTOR_SHFT);
+ val = uv_hub_ipi_value(apicid, vector, dmode);
uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
}
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index b990b5cc954..23824fef789 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -19,7 +19,7 @@
#include
#include
-#include
+#include
#include
#include
#include
@@ -1162,7 +1162,7 @@ static void dma_ops_free_addresses(struct dma_ops_domain *dom,
address = (address % APERTURE_RANGE_SIZE) >> PAGE_SHIFT;
- iommu_area_free(range->bitmap, address, pages);
+ bitmap_clear(range->bitmap, address, pages);
}
diff --git a/arch/x86/kernel/bios_uv.c b/arch/x86/kernel/bios_uv.c
index 63a88e1f987..b0206a211b0 100644
--- a/arch/x86/kernel/bios_uv.c
+++ b/arch/x86/kernel/bios_uv.c
@@ -101,21 +101,17 @@ s64 uv_bios_get_sn_info(int fc, int *uvtype, long *partid, long *coher,
}
int
-uv_bios_mq_watchlist_alloc(int blade, unsigned long addr, unsigned int mq_size,
+uv_bios_mq_watchlist_alloc(unsigned long addr, unsigned int mq_size,
unsigned long *intr_mmr_offset)
{
- union uv_watchlist_u size_blade;
u64 watchlist;
s64 ret;
- size_blade.size = mq_size;
- size_blade.blade = blade;
-
/*
* bios returns watchlist number or negative error number.
*/
ret = (int)uv_bios_call_irqsave(UV_BIOS_WATCHLIST_ALLOC, addr,
- size_blade.val, (u64)intr_mmr_offset,
+ mq_size, (u64)intr_mmr_offset,
(u64)&watchlist, 0);
if (ret < BIOS_STATUS_SUCCESS)
return ret;
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
index c563e4c8ff3..2bbde607814 100644
--- a/arch/x86/kernel/pci-calgary_64.c
+++ b/arch/x86/kernel/pci-calgary_64.c
@@ -31,7 +31,7 @@
#include
#include
#include
-#include
+#include
#include
#include
#include
@@ -212,7 +212,7 @@ static void iommu_range_reserve(struct iommu_table *tbl,
spin_lock_irqsave(&tbl->it_lock, flags);
- iommu_area_reserve(tbl->it_map, index, npages);
+ bitmap_set(tbl->it_map, index, npages);
spin_unlock_irqrestore(&tbl->it_lock, flags);
}
@@ -303,7 +303,7 @@ static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
spin_lock_irqsave(&tbl->it_lock, flags);
- iommu_area_free(tbl->it_map, entry, npages);
+ bitmap_clear(tbl->it_map, entry, npages);
spin_unlock_irqrestore(&tbl->it_lock, flags);
}
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index 56c0e730d3f..34de53b46f8 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -23,7 +23,7 @@
#include
#include
#include
-#include
+#include
#include
#include
#include
@@ -126,7 +126,7 @@ static void free_iommu(unsigned long offset, int size)
unsigned long flags;
spin_lock_irqsave(&iommu_bitmap_lock, flags);
- iommu_area_free(iommu_gart_bitmap, offset, size);
+ bitmap_clear(iommu_gart_bitmap, offset, size);
if (offset >= next_bit)
next_bit = offset + size;
spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
@@ -792,7 +792,7 @@ int __init gart_iommu_init(void)
* Out of IOMMU space handling.
* Reserve some invalid pages at the beginning of the GART.
*/
- iommu_area_reserve(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
+ bitmap_set(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
pr_info("PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
iommu_size >> 20);
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 7079ddaf073..2779321046b 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -1676,21 +1676,33 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task)
#endif
}
+static void fill_sigtrap_info(struct task_struct *tsk,
+ struct pt_regs *regs,
+ int error_code, int si_code,
+ struct siginfo *info)
+{
+ tsk->thread.trap_no = 1;
+ tsk->thread.error_code = error_code;
+
+ memset(info, 0, sizeof(*info));
+ info->si_signo = SIGTRAP;
+ info->si_code = si_code;
+ info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
+}
+
+void user_single_step_siginfo(struct task_struct *tsk,
+ struct pt_regs *regs,
+ struct siginfo *info)
+{
+ fill_sigtrap_info(tsk, regs, 0, TRAP_BRKPT, info);
+}
+
void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
int error_code, int si_code)
{
struct siginfo info;
- tsk->thread.trap_no = 1;
- tsk->thread.error_code = error_code;
-
- memset(&info, 0, sizeof(info));
- info.si_signo = SIGTRAP;
- info.si_code = si_code;
-
- /* User-mode ip? */
- info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL;
-
+ fill_sigtrap_info(tsk, regs, error_code, si_code, &info);
/* Send us the fake SIGTRAP */
force_sig_info(SIGTRAP, &info, tsk);
}
@@ -1755,29 +1767,22 @@ asmregparm long syscall_trace_enter(struct pt_regs *regs)
asmregparm void syscall_trace_leave(struct pt_regs *regs)
{
+ bool step;
+
if (unlikely(current->audit_context))
audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_exit(regs, regs->ax);
- if (test_thread_flag(TIF_SYSCALL_TRACE))
- tracehook_report_syscall_exit(regs, 0);
-
/*
* If TIF_SYSCALL_EMU is set, we only get here because of
* TIF_SINGLESTEP (i.e. this is PTRACE_SYSEMU_SINGLESTEP).
* We already reported this syscall instruction in
- * syscall_trace_enter(), so don't do any more now.
+ * syscall_trace_enter().
*/
- if (unlikely(test_thread_flag(TIF_SYSCALL_EMU)))
- return;
-
- /*
- * If we are single-stepping, synthesize a trap to follow the
- * system call instruction.
- */
- if (test_thread_flag(TIF_SINGLESTEP) &&
- tracehook_consider_fatal_signal(current, SIGTRAP))
- send_sigtrap(current, regs, 0, TRAP_BRKPT);
+ step = unlikely(test_thread_flag(TIF_SINGLESTEP)) &&
+ !test_thread_flag(TIF_SYSCALL_EMU);
+ if (step || test_thread_flag(TIF_SYSCALL_TRACE))
+ tracehook_report_syscall_exit(regs, step);
}
diff --git a/arch/xtensa/include/asm/elf.h b/arch/xtensa/include/asm/elf.h
index c3f53e755ca..5eb6d695e98 100644
--- a/arch/xtensa/include/asm/elf.h
+++ b/arch/xtensa/include/asm/elf.h
@@ -123,7 +123,6 @@ extern void xtensa_elf_core_copy_regs (xtensa_gregset_t *, struct pt_regs *);
#define ELF_CLASS ELFCLASS32
#define ELF_ARCH EM_XTENSA
-#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE PAGE_SIZE
/*
diff --git a/drivers/char/efirtc.c b/drivers/char/efirtc.c
index 26a47dc88f6..53c524e7b82 100644
--- a/drivers/char/efirtc.c
+++ b/drivers/char/efirtc.c
@@ -285,6 +285,7 @@ static const struct file_operations efi_rtc_fops = {
.unlocked_ioctl = efi_rtc_ioctl,
.open = efi_rtc_open,
.release = efi_rtc_close,
+ .llseek = no_llseek,
};
static struct miscdevice efi_rtc_dev= {
diff --git a/drivers/char/ipmi/ipmi_kcs_sm.c b/drivers/char/ipmi/ipmi_kcs_sm.c
index 80704875794..cf82fedae09 100644
--- a/drivers/char/ipmi/ipmi_kcs_sm.c
+++ b/drivers/char/ipmi/ipmi_kcs_sm.c
@@ -370,7 +370,7 @@ static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time)
return SI_SM_IDLE;
case KCS_START_OP:
- if (state != KCS_IDLE) {
+ if (state != KCS_IDLE_STATE) {
start_error_recovery(kcs,
"State machine not idle at start");
break;
diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c
index 44203ff599d..1ae2de7d8b4 100644
--- a/drivers/char/sysrq.c
+++ b/drivers/char/sysrq.c
@@ -339,7 +339,7 @@ static struct sysrq_key_op sysrq_term_op = {
static void moom_callback(struct work_struct *ignored)
{
- out_of_memory(node_zonelist(0, GFP_KERNEL), GFP_KERNEL, 0);
+ out_of_memory(node_zonelist(0, GFP_KERNEL), GFP_KERNEL, 0, NULL);
}
static DECLARE_WORK(moom_work, moom_callback);
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index e43fbc66aef..50faa1fb0f0 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -164,6 +164,9 @@ module_param(default_utf8, int, S_IRUGO | S_IWUSR);
int global_cursor_default = -1;
module_param(global_cursor_default, int, S_IRUGO | S_IWUSR);
+static int cur_default = CUR_DEFAULT;
+module_param(cur_default, int, S_IRUGO | S_IWUSR);
+
/*
* ignore_poke: don't unblank the screen when things are typed. This is
* mainly for the privacy of braille terminal users.
@@ -1636,7 +1639,7 @@ static void reset_terminal(struct vc_data *vc, int do_clear)
/* do not do set_leds here because this causes an endless tasklet loop
when the keyboard hasn't been initialized yet */
- vc->vc_cursor_type = CUR_DEFAULT;
+ vc->vc_cursor_type = cur_default;
vc->vc_complement_mask = vc->vc_s_complement_mask;
default_attr(vc);
@@ -1838,7 +1841,7 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
if (vc->vc_par[0])
vc->vc_cursor_type = vc->vc_par[0] | (vc->vc_par[1] << 8) | (vc->vc_par[2] << 16);
else
- vc->vc_cursor_type = CUR_DEFAULT;
+ vc->vc_cursor_type = cur_default;
return;
}
break;
diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
index 22db05a67bf..7785d8ffa40 100644
--- a/drivers/edac/i5100_edac.c
+++ b/drivers/edac/i5100_edac.c
@@ -9,6 +9,11 @@
* Intel 5100X Chipset Memory Controller Hub (MCH) - Datasheet
* http://download.intel.com/design/chipsets/datashts/318378.pdf
*
+ * The intel 5100 has two independent channels. EDAC core currently
+ * can not reflect this configuration so instead the chip-select
+ * rows for each respective channel are layed out one after another,
+ * the first half belonging to channel 0, the second half belonging
+ * to channel 1.
*/
#include
#include
@@ -25,6 +30,8 @@
/* device 16, func 1 */
#define I5100_MC 0x40 /* Memory Control Register */
+#define I5100_MC_SCRBEN_MASK (1 << 7)
+#define I5100_MC_SCRBDONE_MASK (1 << 4)
#define I5100_MS 0x44 /* Memory Status Register */
#define I5100_SPDDATA 0x48 /* Serial Presence Detect Status Reg */
#define I5100_SPDCMD 0x4c /* Serial Presence Detect Command Reg */
@@ -72,11 +79,21 @@
/* bit field accessors */
+static inline u32 i5100_mc_scrben(u32 mc)
+{
+ return mc >> 7 & 1;
+}
+
static inline u32 i5100_mc_errdeten(u32 mc)
{
return mc >> 5 & 1;
}
+static inline u32 i5100_mc_scrbdone(u32 mc)
+{
+ return mc >> 4 & 1;
+}
+
static inline u16 i5100_spddata_rdo(u16 a)
{
return a >> 15 & 1;
@@ -265,42 +282,43 @@ static inline u32 i5100_recmemb_ras(u32 a)
}
/* some generic limits */
-#define I5100_MAX_RANKS_PER_CTLR 6
-#define I5100_MAX_CTLRS 2
+#define I5100_MAX_RANKS_PER_CHAN 6
+#define I5100_CHANNELS 2
#define I5100_MAX_RANKS_PER_DIMM 4
#define I5100_DIMM_ADDR_LINES (6 - 3) /* 64 bits / 8 bits per byte */
-#define I5100_MAX_DIMM_SLOTS_PER_CTLR 4
+#define I5100_MAX_DIMM_SLOTS_PER_CHAN 4
#define I5100_MAX_RANK_INTERLEAVE 4
#define I5100_MAX_DMIRS 5
+#define I5100_SCRUB_REFRESH_RATE (5 * 60 * HZ)
struct i5100_priv {
/* ranks on each dimm -- 0 maps to not present -- obtained via SPD */
- int dimm_numrank[I5100_MAX_CTLRS][I5100_MAX_DIMM_SLOTS_PER_CTLR];
+ int dimm_numrank[I5100_CHANNELS][I5100_MAX_DIMM_SLOTS_PER_CHAN];
/*
* mainboard chip select map -- maps i5100 chip selects to
* DIMM slot chip selects. In the case of only 4 ranks per
- * controller, the mapping is fairly obvious but not unique.
- * we map -1 -> NC and assume both controllers use the same
+ * channel, the mapping is fairly obvious but not unique.
+ * we map -1 -> NC and assume both channels use the same
* map...
*
*/
- int dimm_csmap[I5100_MAX_DIMM_SLOTS_PER_CTLR][I5100_MAX_RANKS_PER_DIMM];
+ int dimm_csmap[I5100_MAX_DIMM_SLOTS_PER_CHAN][I5100_MAX_RANKS_PER_DIMM];
/* memory interleave range */
struct {
u64 limit;
unsigned way[2];
- } mir[I5100_MAX_CTLRS];
+ } mir[I5100_CHANNELS];
/* adjusted memory interleave range register */
- unsigned amir[I5100_MAX_CTLRS];
+ unsigned amir[I5100_CHANNELS];
/* dimm interleave range */
struct {
unsigned rank[I5100_MAX_RANK_INTERLEAVE];
u64 limit;
- } dmir[I5100_MAX_CTLRS][I5100_MAX_DMIRS];
+ } dmir[I5100_CHANNELS][I5100_MAX_DMIRS];
/* memory technology registers... */
struct {
@@ -310,30 +328,33 @@ struct i5100_priv {
unsigned numbank; /* 2 or 3 lines */
unsigned numrow; /* 13 .. 16 lines */
unsigned numcol; /* 11 .. 12 lines */
- } mtr[I5100_MAX_CTLRS][I5100_MAX_RANKS_PER_CTLR];
+ } mtr[I5100_CHANNELS][I5100_MAX_RANKS_PER_CHAN];
u64 tolm; /* top of low memory in bytes */
- unsigned ranksperctlr; /* number of ranks per controller */
+ unsigned ranksperchan; /* number of ranks per channel */
struct pci_dev *mc; /* device 16 func 1 */
struct pci_dev *ch0mm; /* device 21 func 0 */
struct pci_dev *ch1mm; /* device 22 func 0 */
+
+ struct delayed_work i5100_scrubbing;
+ int scrub_enable;
};
-/* map a rank/ctlr to a slot number on the mainboard */
+/* map a rank/chan to a slot number on the mainboard */
static int i5100_rank_to_slot(const struct mem_ctl_info *mci,
- int ctlr, int rank)
+ int chan, int rank)
{
const struct i5100_priv *priv = mci->pvt_info;
int i;
- for (i = 0; i < I5100_MAX_DIMM_SLOTS_PER_CTLR; i++) {
+ for (i = 0; i < I5100_MAX_DIMM_SLOTS_PER_CHAN; i++) {
int j;
- const int numrank = priv->dimm_numrank[ctlr][i];
+ const int numrank = priv->dimm_numrank[chan][i];
for (j = 0; j < numrank; j++)
if (priv->dimm_csmap[i][j] == rank)
- return i * 2 + ctlr;
+ return i * 2 + chan;
}
return -1;
@@ -374,32 +395,32 @@ static const char *i5100_err_msg(unsigned err)
return "none";
}
-/* convert csrow index into a rank (per controller -- 0..5) */
+/* convert csrow index into a rank (per channel -- 0..5) */
static int i5100_csrow_to_rank(const struct mem_ctl_info *mci, int csrow)
{
const struct i5100_priv *priv = mci->pvt_info;
- return csrow % priv->ranksperctlr;
+ return csrow % priv->ranksperchan;
}
-/* convert csrow index into a controller (0..1) */
-static int i5100_csrow_to_cntlr(const struct mem_ctl_info *mci, int csrow)
+/* convert csrow index into a channel (0..1) */
+static int i5100_csrow_to_chan(const struct mem_ctl_info *mci, int csrow)
{
const struct i5100_priv *priv = mci->pvt_info;
- return csrow / priv->ranksperctlr;
+ return csrow / priv->ranksperchan;
}
static unsigned i5100_rank_to_csrow(const struct mem_ctl_info *mci,
- int ctlr, int rank)
+ int chan, int rank)
{
const struct i5100_priv *priv = mci->pvt_info;
- return ctlr * priv->ranksperctlr + rank;
+ return chan * priv->ranksperchan + rank;
}
static void i5100_handle_ce(struct mem_ctl_info *mci,
- int ctlr,
+ int chan,
unsigned bank,
unsigned rank,
unsigned long syndrome,
@@ -407,12 +428,12 @@ static void i5100_handle_ce(struct mem_ctl_info *mci,
unsigned ras,
const char *msg)
{
- const int csrow = i5100_rank_to_csrow(mci, ctlr, rank);
+ const int csrow = i5100_rank_to_csrow(mci, chan, rank);
printk(KERN_ERR
- "CE ctlr %d, bank %u, rank %u, syndrome 0x%lx, "
+ "CE chan %d, bank %u, rank %u, syndrome 0x%lx, "
"cas %u, ras %u, csrow %u, label \"%s\": %s\n",
- ctlr, bank, rank, syndrome, cas, ras,
+ chan, bank, rank, syndrome, cas, ras,
csrow, mci->csrows[csrow].channels[0].label, msg);
mci->ce_count++;
@@ -421,7 +442,7 @@ static void i5100_handle_ce(struct mem_ctl_info *mci,
}
static void i5100_handle_ue(struct mem_ctl_info *mci,
- int ctlr,
+ int chan,
unsigned bank,
unsigned rank,
unsigned long syndrome,
@@ -429,23 +450,23 @@ static void i5100_handle_ue(struct mem_ctl_info *mci,
unsigned ras,
const char *msg)
{
- const int csrow = i5100_rank_to_csrow(mci, ctlr, rank);
+ const int csrow = i5100_rank_to_csrow(mci, chan, rank);
printk(KERN_ERR
- "UE ctlr %d, bank %u, rank %u, syndrome 0x%lx, "
+ "UE chan %d, bank %u, rank %u, syndrome 0x%lx, "
"cas %u, ras %u, csrow %u, label \"%s\": %s\n",
- ctlr, bank, rank, syndrome, cas, ras,
+ chan, bank, rank, syndrome, cas, ras,
csrow, mci->csrows[csrow].channels[0].label, msg);
mci->ue_count++;
mci->csrows[csrow].ue_count++;
}
-static void i5100_read_log(struct mem_ctl_info *mci, int ctlr,
+static void i5100_read_log(struct mem_ctl_info *mci, int chan,
u32 ferr, u32 nerr)
{
struct i5100_priv *priv = mci->pvt_info;
- struct pci_dev *pdev = (ctlr) ? priv->ch1mm : priv->ch0mm;
+ struct pci_dev *pdev = (chan) ? priv->ch1mm : priv->ch0mm;
u32 dw;
u32 dw2;
unsigned syndrome = 0;
@@ -484,7 +505,7 @@ static void i5100_read_log(struct mem_ctl_info *mci, int ctlr,
else
msg = i5100_err_msg(nerr);
- i5100_handle_ce(mci, ctlr, bank, rank, syndrome, cas, ras, msg);
+ i5100_handle_ce(mci, chan, bank, rank, syndrome, cas, ras, msg);
}
if (i5100_validlog_nrecmemvalid(dw)) {
@@ -506,7 +527,7 @@ static void i5100_read_log(struct mem_ctl_info *mci, int ctlr,
else
msg = i5100_err_msg(nerr);
- i5100_handle_ue(mci, ctlr, bank, rank, syndrome, cas, ras, msg);
+ i5100_handle_ue(mci, chan, bank, rank, syndrome, cas, ras, msg);
}
pci_write_config_dword(pdev, I5100_VALIDLOG, dw);
@@ -534,6 +555,80 @@ static void i5100_check_error(struct mem_ctl_info *mci)
}
}
+/* The i5100 chipset will scrub the entire memory once, then
+ * set a done bit. Continuous scrubbing is achieved by enqueing
+ * delayed work to a workqueue, checking every few minutes if
+ * the scrubbing has completed and if so reinitiating it.
+ */
+
+static void i5100_refresh_scrubbing(struct work_struct *work)
+{
+ struct delayed_work *i5100_scrubbing = container_of(work,
+ struct delayed_work,
+ work);
+ struct i5100_priv *priv = container_of(i5100_scrubbing,
+ struct i5100_priv,
+ i5100_scrubbing);
+ u32 dw;
+
+ pci_read_config_dword(priv->mc, I5100_MC, &dw);
+
+ if (priv->scrub_enable) {
+
+ pci_read_config_dword(priv->mc, I5100_MC, &dw);
+
+ if (i5100_mc_scrbdone(dw)) {
+ dw |= I5100_MC_SCRBEN_MASK;
+ pci_write_config_dword(priv->mc, I5100_MC, dw);
+ pci_read_config_dword(priv->mc, I5100_MC, &dw);
+ }
+
+ schedule_delayed_work(&(priv->i5100_scrubbing),
+ I5100_SCRUB_REFRESH_RATE);
+ }
+}
+/*
+ * The bandwidth is based on experimentation, feel free to refine it.
+ */
+static int i5100_set_scrub_rate(struct mem_ctl_info *mci,
+ u32 *bandwidth)
+{
+ struct i5100_priv *priv = mci->pvt_info;
+ u32 dw;
+
+ pci_read_config_dword(priv->mc, I5100_MC, &dw);
+ if (*bandwidth) {
+ priv->scrub_enable = 1;
+ dw |= I5100_MC_SCRBEN_MASK;
+ schedule_delayed_work(&(priv->i5100_scrubbing),
+ I5100_SCRUB_REFRESH_RATE);
+ } else {
+ priv->scrub_enable = 0;
+ dw &= ~I5100_MC_SCRBEN_MASK;
+ cancel_delayed_work(&(priv->i5100_scrubbing));
+ }
+ pci_write_config_dword(priv->mc, I5100_MC, dw);
+
+ pci_read_config_dword(priv->mc, I5100_MC, &dw);
+
+ *bandwidth = 5900000 * i5100_mc_scrben(dw);
+
+ return 0;
+}
+
+static int i5100_get_scrub_rate(struct mem_ctl_info *mci,
+ u32 *bandwidth)
+{
+ struct i5100_priv *priv = mci->pvt_info;
+ u32 dw;
+
+ pci_read_config_dword(priv->mc, I5100_MC, &dw);
+
+ *bandwidth = 5900000 * i5100_mc_scrben(dw);
+
+ return 0;
+}
+
static struct pci_dev *pci_get_device_func(unsigned vendor,
unsigned device,
unsigned func)
@@ -557,19 +652,19 @@ static unsigned long __devinit i5100_npages(struct mem_ctl_info *mci,
int csrow)
{
struct i5100_priv *priv = mci->pvt_info;
- const unsigned ctlr_rank = i5100_csrow_to_rank(mci, csrow);
- const unsigned ctlr = i5100_csrow_to_cntlr(mci, csrow);
+ const unsigned chan_rank = i5100_csrow_to_rank(mci, csrow);
+ const unsigned chan = i5100_csrow_to_chan(mci, csrow);
unsigned addr_lines;
/* dimm present? */
- if (!priv->mtr[ctlr][ctlr_rank].present)
+ if (!priv->mtr[chan][chan_rank].present)
return 0ULL;
addr_lines =
I5100_DIMM_ADDR_LINES +
- priv->mtr[ctlr][ctlr_rank].numcol +
- priv->mtr[ctlr][ctlr_rank].numrow +
- priv->mtr[ctlr][ctlr_rank].numbank;
+ priv->mtr[chan][chan_rank].numcol +
+ priv->mtr[chan][chan_rank].numrow +
+ priv->mtr[chan][chan_rank].numbank;
return (unsigned long)
((unsigned long long) (1ULL << addr_lines) / PAGE_SIZE);
@@ -581,11 +676,11 @@ static void __devinit i5100_init_mtr(struct mem_ctl_info *mci)
struct pci_dev *mms[2] = { priv->ch0mm, priv->ch1mm };
int i;
- for (i = 0; i < I5100_MAX_CTLRS; i++) {
+ for (i = 0; i < I5100_CHANNELS; i++) {
int j;
struct pci_dev *pdev = mms[i];
- for (j = 0; j < I5100_MAX_RANKS_PER_CTLR; j++) {
+ for (j = 0; j < I5100_MAX_RANKS_PER_CHAN; j++) {
const unsigned addr =
(j < 4) ? I5100_MTR_0 + j * 2 :
I5100_MTR_4 + (j - 4) * 2;
@@ -644,7 +739,6 @@ static int i5100_read_spd_byte(const struct mem_ctl_info *mci,
* fill dimm chip select map
*
* FIXME:
- * o only valid for 4 ranks per controller
* o not the only way to may chip selects to dimm slots
* o investigate if there is some way to obtain this map from the bios
*/
@@ -653,9 +747,7 @@ static void __devinit i5100_init_dimm_csmap(struct mem_ctl_info *mci)
struct i5100_priv *priv = mci->pvt_info;
int i;
- WARN_ON(priv->ranksperctlr != 4);
-
- for (i = 0; i < I5100_MAX_DIMM_SLOTS_PER_CTLR; i++) {
+ for (i = 0; i < I5100_MAX_DIMM_SLOTS_PER_CHAN; i++) {
int j;
for (j = 0; j < I5100_MAX_RANKS_PER_DIMM; j++)
@@ -663,12 +755,21 @@ static void __devinit i5100_init_dimm_csmap(struct mem_ctl_info *mci)
}
/* only 2 chip selects per slot... */
- priv->dimm_csmap[0][0] = 0;
- priv->dimm_csmap[0][1] = 3;
- priv->dimm_csmap[1][0] = 1;
- priv->dimm_csmap[1][1] = 2;
- priv->dimm_csmap[2][0] = 2;
- priv->dimm_csmap[3][0] = 3;
+ if (priv->ranksperchan == 4) {
+ priv->dimm_csmap[0][0] = 0;
+ priv->dimm_csmap[0][1] = 3;
+ priv->dimm_csmap[1][0] = 1;
+ priv->dimm_csmap[1][1] = 2;
+ priv->dimm_csmap[2][0] = 2;
+ priv->dimm_csmap[3][0] = 3;
+ } else {
+ priv->dimm_csmap[0][0] = 0;
+ priv->dimm_csmap[0][1] = 1;
+ priv->dimm_csmap[1][0] = 2;
+ priv->dimm_csmap[1][1] = 3;
+ priv->dimm_csmap[2][0] = 4;
+ priv->dimm_csmap[2][1] = 5;
+ }
}
static void __devinit i5100_init_dimm_layout(struct pci_dev *pdev,
@@ -677,10 +778,10 @@ static void __devinit i5100_init_dimm_layout(struct pci_dev *pdev,
struct i5100_priv *priv = mci->pvt_info;
int i;
- for (i = 0; i < I5100_MAX_CTLRS; i++) {
+ for (i = 0; i < I5100_CHANNELS; i++) {
int j;
- for (j = 0; j < I5100_MAX_DIMM_SLOTS_PER_CTLR; j++) {
+ for (j = 0; j < I5100_MAX_DIMM_SLOTS_PER_CHAN; j++) {
u8 rank;
if (i5100_read_spd_byte(mci, i, j, 5, &rank) < 0)
@@ -720,7 +821,7 @@ static void __devinit i5100_init_interleaving(struct pci_dev *pdev,
pci_read_config_word(pdev, I5100_AMIR_1, &w);
priv->amir[1] = w;
- for (i = 0; i < I5100_MAX_CTLRS; i++) {
+ for (i = 0; i < I5100_CHANNELS; i++) {
int j;
for (j = 0; j < 5; j++) {
@@ -747,7 +848,7 @@ static void __devinit i5100_init_csrows(struct mem_ctl_info *mci)
for (i = 0; i < mci->nr_csrows; i++) {
const unsigned long npages = i5100_npages(mci, i);
- const unsigned cntlr = i5100_csrow_to_cntlr(mci, i);
+ const unsigned chan = i5100_csrow_to_chan(mci, i);
const unsigned rank = i5100_csrow_to_rank(mci, i);
if (!npages)
@@ -765,7 +866,7 @@ static void __devinit i5100_init_csrows(struct mem_ctl_info *mci)
mci->csrows[i].grain = 32;
mci->csrows[i].csrow_idx = i;
mci->csrows[i].dtype =
- (priv->mtr[cntlr][rank].width == 4) ? DEV_X4 : DEV_X8;
+ (priv->mtr[chan][rank].width == 4) ? DEV_X4 : DEV_X8;
mci->csrows[i].ue_count = 0;
mci->csrows[i].ce_count = 0;
mci->csrows[i].mtype = MEM_RDDR2;
@@ -777,7 +878,7 @@ static void __devinit i5100_init_csrows(struct mem_ctl_info *mci)
mci->csrows[i].channels[0].csrow = mci->csrows + i;
snprintf(mci->csrows[i].channels[0].label,
sizeof(mci->csrows[i].channels[0].label),
- "DIMM%u", i5100_rank_to_slot(mci, cntlr, rank));
+ "DIMM%u", i5100_rank_to_slot(mci, chan, rank));
total_pages += npages;
}
@@ -815,13 +916,6 @@ static int __devinit i5100_init_one(struct pci_dev *pdev,
pci_read_config_dword(pdev, I5100_MS, &dw);
ranksperch = !!(dw & (1 << 8)) * 2 + 4;
- if (ranksperch != 4) {
- /* FIXME: get 6 ranks / controller to work - need hw... */
- printk(KERN_INFO "i5100_edac: unsupported configuration.\n");
- ret = -ENODEV;
- goto bail_pdev;
- }
-
/* enable error reporting... */
pci_read_config_dword(pdev, I5100_EMASK_MEM, &dw);
dw &= ~I5100_FERR_NF_MEM_ANY_MASK;
@@ -864,11 +958,21 @@ static int __devinit i5100_init_one(struct pci_dev *pdev,
mci->dev = &pdev->dev;
priv = mci->pvt_info;
- priv->ranksperctlr = ranksperch;
+ priv->ranksperchan = ranksperch;
priv->mc = pdev;
priv->ch0mm = ch0mm;
priv->ch1mm = ch1mm;
+ INIT_DELAYED_WORK(&(priv->i5100_scrubbing), i5100_refresh_scrubbing);
+
+ /* If scrubbing was already enabled by the bios, start maintaining it */
+ pci_read_config_dword(pdev, I5100_MC, &dw);
+ if (i5100_mc_scrben(dw)) {
+ priv->scrub_enable = 1;
+ schedule_delayed_work(&(priv->i5100_scrubbing),
+ I5100_SCRUB_REFRESH_RATE);
+ }
+
i5100_init_dimm_layout(pdev, mci);
i5100_init_interleaving(pdev, mci);
@@ -882,6 +986,8 @@ static int __devinit i5100_init_one(struct pci_dev *pdev,
mci->ctl_page_to_phys = NULL;
mci->edac_check = i5100_check_error;
+ mci->set_sdram_scrub_rate = i5100_set_scrub_rate;
+ mci->get_sdram_scrub_rate = i5100_get_scrub_rate;
i5100_init_csrows(mci);
@@ -897,12 +1003,14 @@ static int __devinit i5100_init_one(struct pci_dev *pdev,
if (edac_mc_add_mc(mci)) {
ret = -ENODEV;
- goto bail_mc;
+ goto bail_scrub;
}
return ret;
-bail_mc:
+bail_scrub:
+ priv->scrub_enable = 0;
+ cancel_delayed_work_sync(&(priv->i5100_scrubbing));
edac_mc_free(mci);
bail_disable_ch1:
@@ -935,6 +1043,10 @@ static void __devexit i5100_remove_one(struct pci_dev *pdev)
return;
priv = mci->pvt_info;
+
+ priv->scrub_enable = 0;
+ cancel_delayed_work_sync(&(priv->i5100_scrubbing));
+
pci_disable_device(pdev);
pci_disable_device(priv->ch0mm);
pci_disable_device(priv->ch1mm);
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 57ca339924e..a019b49ecc9 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -206,6 +206,12 @@ config GPIO_LANGWELL
help
Say Y here to support Intel Moorestown platform GPIO.
+config GPIO_TIMBERDALE
+ bool "Support for timberdale GPIO IP"
+ depends on MFD_TIMBERDALE && GPIOLIB && HAS_IOMEM
+ ---help---
+ Add support for the GPIO IP in the timberdale FPGA.
+
comment "SPI GPIO expanders:"
config GPIO_MAX7301
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 270b6d7839f..52fe4cf734c 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_GPIO_MCP23S08) += mcp23s08.o
obj-$(CONFIG_GPIO_PCA953X) += pca953x.o
obj-$(CONFIG_GPIO_PCF857X) += pcf857x.o
obj-$(CONFIG_GPIO_PL061) += pl061.o
+obj-$(CONFIG_GPIO_TIMBERDALE) += timbgpio.o
obj-$(CONFIG_GPIO_TWL4030) += twl4030-gpio.o
obj-$(CONFIG_GPIO_UCB1400) += ucb1400_gpio.o
obj-$(CONFIG_GPIO_XILINX) += xilinx_gpio.o
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 50de0f5750d..a25ad284a27 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -53,6 +53,7 @@ struct gpio_desc {
#define FLAG_SYSFS 4 /* exported via /sys/class/gpio/control */
#define FLAG_TRIG_FALL 5 /* trigger on falling edge */
#define FLAG_TRIG_RISE 6 /* trigger on rising edge */
+#define FLAG_ACTIVE_LOW 7 /* sysfs value has active low */
#define PDESC_ID_SHIFT 16 /* add new flags before this one */
@@ -210,6 +211,11 @@ static DEFINE_MUTEX(sysfs_lock);
* * configures behavior of poll(2) on /value
* * available only if pin can generate IRQs on input
* * is read/write as "none", "falling", "rising", or "both"
+ * /active_low
+ * * configures polarity of /value
+ * * is read/write as zero/nonzero
+ * * also affects existing and subsequent "falling" and "rising"
+ * /edge configuration
*/
static ssize_t gpio_direction_show(struct device *dev,
@@ -255,7 +261,7 @@ static ssize_t gpio_direction_store(struct device *dev,
return status ? : size;
}
-static const DEVICE_ATTR(direction, 0644,
+static /* const */ DEVICE_ATTR(direction, 0644,
gpio_direction_show, gpio_direction_store);
static ssize_t gpio_value_show(struct device *dev,
@@ -267,10 +273,17 @@ static ssize_t gpio_value_show(struct device *dev,
mutex_lock(&sysfs_lock);
- if (!test_bit(FLAG_EXPORT, &desc->flags))
+ if (!test_bit(FLAG_EXPORT, &desc->flags)) {
status = -EIO;
- else
- status = sprintf(buf, "%d\n", !!gpio_get_value_cansleep(gpio));
+ } else {
+ int value;
+
+ value = !!gpio_get_value_cansleep(gpio);
+ if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
+ value = !value;
+
+ status = sprintf(buf, "%d\n", value);
+ }
mutex_unlock(&sysfs_lock);
return status;
@@ -294,6 +307,8 @@ static ssize_t gpio_value_store(struct device *dev,
status = strict_strtol(buf, 0, &value);
if (status == 0) {
+ if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
+ value = !value;
gpio_set_value_cansleep(gpio, value != 0);
status = size;
}
@@ -303,7 +318,7 @@ static ssize_t gpio_value_store(struct device *dev,
return status;
}
-static /*const*/ DEVICE_ATTR(value, 0644,
+static const DEVICE_ATTR(value, 0644,
gpio_value_show, gpio_value_store);
static irqreturn_t gpio_sysfs_irq(int irq, void *priv)
@@ -352,9 +367,11 @@ static int gpio_setup_irq(struct gpio_desc *desc, struct device *dev,
irq_flags = IRQF_SHARED;
if (test_bit(FLAG_TRIG_FALL, &gpio_flags))
- irq_flags |= IRQF_TRIGGER_FALLING;
+ irq_flags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
+ IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
if (test_bit(FLAG_TRIG_RISE, &gpio_flags))
- irq_flags |= IRQF_TRIGGER_RISING;
+ irq_flags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
+ IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING;
if (!pdesc) {
pdesc = kmalloc(sizeof(*pdesc), GFP_KERNEL);
@@ -475,9 +492,79 @@ found:
static DEVICE_ATTR(edge, 0644, gpio_edge_show, gpio_edge_store);
+static int sysfs_set_active_low(struct gpio_desc *desc, struct device *dev,
+ int value)
+{
+ int status = 0;
+
+ if (!!test_bit(FLAG_ACTIVE_LOW, &desc->flags) == !!value)
+ return 0;
+
+ if (value)
+ set_bit(FLAG_ACTIVE_LOW, &desc->flags);
+ else
+ clear_bit(FLAG_ACTIVE_LOW, &desc->flags);
+
+ /* reconfigure poll(2) support if enabled on one edge only */
+ if (dev != NULL && (!!test_bit(FLAG_TRIG_RISE, &desc->flags) ^
+ !!test_bit(FLAG_TRIG_FALL, &desc->flags))) {
+ unsigned long trigger_flags = desc->flags & GPIO_TRIGGER_MASK;
+
+ gpio_setup_irq(desc, dev, 0);
+ status = gpio_setup_irq(desc, dev, trigger_flags);
+ }
+
+ return status;
+}
+
+static ssize_t gpio_active_low_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ const struct gpio_desc *desc = dev_get_drvdata(dev);
+ ssize_t status;
+
+ mutex_lock(&sysfs_lock);
+
+ if (!test_bit(FLAG_EXPORT, &desc->flags))
+ status = -EIO;
+ else
+ status = sprintf(buf, "%d\n",
+ !!test_bit(FLAG_ACTIVE_LOW, &desc->flags));
+
+ mutex_unlock(&sysfs_lock);
+
+ return status;
+}
+
+static ssize_t gpio_active_low_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct gpio_desc *desc = dev_get_drvdata(dev);
+ ssize_t status;
+
+ mutex_lock(&sysfs_lock);
+
+ if (!test_bit(FLAG_EXPORT, &desc->flags)) {
+ status = -EIO;
+ } else {
+ long value;
+
+ status = strict_strtol(buf, 0, &value);
+ if (status == 0)
+ status = sysfs_set_active_low(desc, dev, value != 0);
+ }
+
+ mutex_unlock(&sysfs_lock);
+
+ return status ? : size;
+}
+
+static const DEVICE_ATTR(active_low, 0644,
+ gpio_active_low_show, gpio_active_low_store);
+
static const struct attribute *gpio_attrs[] = {
- &dev_attr_direction.attr,
&dev_attr_value.attr,
+ &dev_attr_active_low.attr,
NULL,
};
@@ -662,12 +749,12 @@ int gpio_export(unsigned gpio, bool direction_may_change)
dev = device_create(&gpio_class, desc->chip->dev, MKDEV(0, 0),
desc, ioname ? ioname : "gpio%d", gpio);
if (!IS_ERR(dev)) {
- if (direction_may_change)
- status = sysfs_create_group(&dev->kobj,
+ status = sysfs_create_group(&dev->kobj,
&gpio_attr_group);
- else
+
+ if (!status && direction_may_change)
status = device_create_file(dev,
- &dev_attr_value);
+ &dev_attr_direction);
if (!status && gpio_to_irq(gpio) >= 0
&& (direction_may_change
@@ -744,6 +831,55 @@ done:
}
EXPORT_SYMBOL_GPL(gpio_export_link);
+
+/**
+ * gpio_sysfs_set_active_low - set the polarity of gpio sysfs value
+ * @gpio: gpio to change
+ * @value: non-zero to use active low, i.e. inverted values
+ *
+ * Set the polarity of /sys/class/gpio/gpioN/value sysfs attribute.
+ * The GPIO does not have to be exported yet. If poll(2) support has
+ * been enabled for either rising or falling edge, it will be
+ * reconfigured to follow the new polarity.
+ *
+ * Returns zero on success, else an error.
+ */
+int gpio_sysfs_set_active_low(unsigned gpio, int value)
+{
+ struct gpio_desc *desc;
+ struct device *dev = NULL;
+ int status = -EINVAL;
+
+ if (!gpio_is_valid(gpio))
+ goto done;
+
+ mutex_lock(&sysfs_lock);
+
+ desc = &gpio_desc[gpio];
+
+ if (test_bit(FLAG_EXPORT, &desc->flags)) {
+ struct device *dev;
+
+ dev = class_find_device(&gpio_class, NULL, desc, match_export);
+ if (dev == NULL) {
+ status = -ENODEV;
+ goto unlock;
+ }
+ }
+
+ status = sysfs_set_active_low(desc, dev, value);
+
+unlock:
+ mutex_unlock(&sysfs_lock);
+
+done:
+ if (status)
+ pr_debug("%s: gpio%d status %d\n", __func__, gpio, status);
+
+ return status;
+}
+EXPORT_SYMBOL_GPL(gpio_sysfs_set_active_low);
+
/**
* gpio_unexport - reverse effect of gpio_export()
* @gpio: gpio to make unavailable
@@ -1094,6 +1230,7 @@ void gpio_free(unsigned gpio)
}
desc_set_label(desc, NULL);
module_put(desc->chip->owner);
+ clear_bit(FLAG_ACTIVE_LOW, &desc->flags);
clear_bit(FLAG_REQUESTED, &desc->flags);
} else
WARN_ON(extra_checks);
diff --git a/drivers/gpio/langwell_gpio.c b/drivers/gpio/langwell_gpio.c
index 4baf3d7d0f8..6c0ebbdc659 100644
--- a/drivers/gpio/langwell_gpio.c
+++ b/drivers/gpio/langwell_gpio.c
@@ -123,7 +123,7 @@ static int lnw_irq_type(unsigned irq, unsigned type)
void __iomem *grer = (void __iomem *)(&lnw->reg_base->GRER[reg]);
void __iomem *gfer = (void __iomem *)(&lnw->reg_base->GFER[reg]);
- if (gpio < 0 || gpio > lnw->chip.ngpio)
+ if (gpio >= lnw->chip.ngpio)
return -EINVAL;
spin_lock_irqsave(&lnw->lock, flags);
if (type & IRQ_TYPE_EDGE_RISING)
diff --git a/drivers/gpio/timbgpio.c b/drivers/gpio/timbgpio.c
new file mode 100644
index 00000000000..a4d344ba8e5
--- /dev/null
+++ b/drivers/gpio/timbgpio.c
@@ -0,0 +1,342 @@
+/*
+ * timbgpio.c timberdale FPGA GPIO driver
+ * Copyright (c) 2009 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/* Supports:
+ * Timberdale FPGA GPIO
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include
+
+#define DRIVER_NAME "timb-gpio"
+
+#define TGPIOVAL 0x00
+#define TGPIODIR 0x04
+#define TGPIO_IER 0x08
+#define TGPIO_ISR 0x0c
+#define TGPIO_IPR 0x10
+#define TGPIO_ICR 0x14
+#define TGPIO_FLR 0x18
+#define TGPIO_LVR 0x1c
+
+struct timbgpio {
+ void __iomem *membase;
+ spinlock_t lock; /* mutual exclusion */
+ struct gpio_chip gpio;
+ int irq_base;
+};
+
+static int timbgpio_update_bit(struct gpio_chip *gpio, unsigned index,
+ unsigned offset, bool enabled)
+{
+ struct timbgpio *tgpio = container_of(gpio, struct timbgpio, gpio);
+ u32 reg;
+
+ spin_lock(&tgpio->lock);
+ reg = ioread32(tgpio->membase + offset);
+
+ if (enabled)
+ reg |= (1 << index);
+ else
+ reg &= ~(1 << index);
+
+ iowrite32(reg, tgpio->membase + offset);
+ spin_unlock(&tgpio->lock);
+
+ return 0;
+}
+
+static int timbgpio_gpio_direction_input(struct gpio_chip *gpio, unsigned nr)
+{
+ return timbgpio_update_bit(gpio, nr, TGPIODIR, true);
+}
+
+static int timbgpio_gpio_get(struct gpio_chip *gpio, unsigned nr)
+{
+ struct timbgpio *tgpio = container_of(gpio, struct timbgpio, gpio);
+ u32 value;
+
+ value = ioread32(tgpio->membase + TGPIOVAL);
+ return (value & (1 << nr)) ? 1 : 0;
+}
+
+static int timbgpio_gpio_direction_output(struct gpio_chip *gpio,
+ unsigned nr, int val)
+{
+ return timbgpio_update_bit(gpio, nr, TGPIODIR, false);
+}
+
+static void timbgpio_gpio_set(struct gpio_chip *gpio,
+ unsigned nr, int val)
+{
+ timbgpio_update_bit(gpio, nr, TGPIOVAL, val != 0);
+}
+
+static int timbgpio_to_irq(struct gpio_chip *gpio, unsigned offset)
+{
+ struct timbgpio *tgpio = container_of(gpio, struct timbgpio, gpio);
+
+ if (tgpio->irq_base <= 0)
+ return -EINVAL;
+
+ return tgpio->irq_base + offset;
+}
+
+/*
+ * GPIO IRQ
+ */
+static void timbgpio_irq_disable(unsigned irq)
+{
+ struct timbgpio *tgpio = get_irq_chip_data(irq);
+ int offset = irq - tgpio->irq_base;
+
+ timbgpio_update_bit(&tgpio->gpio, offset, TGPIO_IER, 0);
+}
+
+static void timbgpio_irq_enable(unsigned irq)
+{
+ struct timbgpio *tgpio = get_irq_chip_data(irq);
+ int offset = irq - tgpio->irq_base;
+
+ timbgpio_update_bit(&tgpio->gpio, offset, TGPIO_IER, 1);
+}
+
+static int timbgpio_irq_type(unsigned irq, unsigned trigger)
+{
+ struct timbgpio *tgpio = get_irq_chip_data(irq);
+ int offset = irq - tgpio->irq_base;
+ unsigned long flags;
+ u32 lvr, flr;
+
+ if (offset < 0 || offset > tgpio->gpio.ngpio)
+ return -EINVAL;
+
+ spin_lock_irqsave(&tgpio->lock, flags);
+
+ lvr = ioread32(tgpio->membase + TGPIO_LVR);
+ flr = ioread32(tgpio->membase + TGPIO_FLR);
+
+ if (trigger & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) {
+ flr &= ~(1 << offset);
+ if (trigger & IRQ_TYPE_LEVEL_HIGH)
+ lvr |= 1 << offset;
+ else
+ lvr &= ~(1 << offset);
+ }
+
+ if ((trigger & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH)
+ return -EINVAL;
+ else {
+ flr |= 1 << offset;
+ /* opposite compared to the datasheet, but it mirrors the
+ * reality
+ */
+ if (trigger & IRQ_TYPE_EDGE_FALLING)
+ lvr |= 1 << offset;
+ else
+ lvr &= ~(1 << offset);
+ }
+
+ iowrite32(lvr, tgpio->membase + TGPIO_LVR);
+ iowrite32(flr, tgpio->membase + TGPIO_FLR);
+ iowrite32(1 << offset, tgpio->membase + TGPIO_ICR);
+ spin_unlock_irqrestore(&tgpio->lock, flags);
+
+ return 0;
+}
+
+static void timbgpio_irq(unsigned int irq, struct irq_desc *desc)
+{
+ struct timbgpio *tgpio = get_irq_data(irq);
+ unsigned long ipr;
+ int offset;
+
+ desc->chip->ack(irq);
+ ipr = ioread32(tgpio->membase + TGPIO_IPR);
+ iowrite32(ipr, tgpio->membase + TGPIO_ICR);
+
+ for_each_bit(offset, &ipr, tgpio->gpio.ngpio)
+ generic_handle_irq(timbgpio_to_irq(&tgpio->gpio, offset));
+}
+
+static struct irq_chip timbgpio_irqchip = {
+ .name = "GPIO",
+ .enable = timbgpio_irq_enable,
+ .disable = timbgpio_irq_disable,
+ .set_type = timbgpio_irq_type,
+};
+
+static int __devinit timbgpio_probe(struct platform_device *pdev)
+{
+ int err, i;
+ struct gpio_chip *gc;
+ struct timbgpio *tgpio;
+ struct resource *iomem;
+ struct timbgpio_platform_data *pdata = pdev->dev.platform_data;
+ int irq = platform_get_irq(pdev, 0);
+
+ if (!pdata || pdata->nr_pins > 32) {
+ err = -EINVAL;
+ goto err_mem;
+ }
+
+ iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!iomem) {
+ err = -EINVAL;
+ goto err_mem;
+ }
+
+ tgpio = kzalloc(sizeof(*tgpio), GFP_KERNEL);
+ if (!tgpio) {
+ err = -EINVAL;
+ goto err_mem;
+ }
+ tgpio->irq_base = pdata->irq_base;
+
+ spin_lock_init(&tgpio->lock);
+
+ if (!request_mem_region(iomem->start, resource_size(iomem),
+ DRIVER_NAME)) {
+ err = -EBUSY;
+ goto err_request;
+ }
+
+ tgpio->membase = ioremap(iomem->start, resource_size(iomem));
+ if (!tgpio->membase) {
+ err = -ENOMEM;
+ goto err_ioremap;
+ }
+
+ gc = &tgpio->gpio;
+
+ gc->label = dev_name(&pdev->dev);
+ gc->owner = THIS_MODULE;
+ gc->dev = &pdev->dev;
+ gc->direction_input = timbgpio_gpio_direction_input;
+ gc->get = timbgpio_gpio_get;
+ gc->direction_output = timbgpio_gpio_direction_output;
+ gc->set = timbgpio_gpio_set;
+ gc->to_irq = (irq >= 0 && tgpio->irq_base > 0) ? timbgpio_to_irq : NULL;
+ gc->dbg_show = NULL;
+ gc->base = pdata->gpio_base;
+ gc->ngpio = pdata->nr_pins;
+ gc->can_sleep = 0;
+
+ err = gpiochip_add(gc);
+ if (err)
+ goto err_chipadd;
+
+ platform_set_drvdata(pdev, tgpio);
+
+ /* make sure to disable interrupts */
+ iowrite32(0x0, tgpio->membase + TGPIO_IER);
+
+ if (irq < 0 || tgpio->irq_base <= 0)
+ return 0;
+
+ for (i = 0; i < pdata->nr_pins; i++) {
+ set_irq_chip_and_handler_name(tgpio->irq_base + i,
+ &timbgpio_irqchip, handle_simple_irq, "mux");
+ set_irq_chip_data(tgpio->irq_base + i, tgpio);
+#ifdef CONFIG_ARM
+ set_irq_flags(tgpio->irq_base + i, IRQF_VALID | IRQF_PROBE);
+#endif
+ }
+
+ set_irq_data(irq, tgpio);
+ set_irq_chained_handler(irq, timbgpio_irq);
+
+ return 0;
+
+err_chipadd:
+ iounmap(tgpio->membase);
+err_ioremap:
+ release_mem_region(iomem->start, resource_size(iomem));
+err_request:
+ kfree(tgpio);
+err_mem:
+ printk(KERN_ERR DRIVER_NAME": Failed to register GPIOs: %d\n", err);
+
+ return err;
+}
+
+static int __devexit timbgpio_remove(struct platform_device *pdev)
+{
+ int err;
+ struct timbgpio_platform_data *pdata = pdev->dev.platform_data;
+ struct timbgpio *tgpio = platform_get_drvdata(pdev);
+ struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ int irq = platform_get_irq(pdev, 0);
+
+ if (irq >= 0 && tgpio->irq_base > 0) {
+ int i;
+ for (i = 0; i < pdata->nr_pins; i++) {
+ set_irq_chip(tgpio->irq_base + i, NULL);
+ set_irq_chip_data(tgpio->irq_base + i, NULL);
+ }
+
+ set_irq_handler(irq, NULL);
+ set_irq_data(irq, NULL);
+ }
+
+ err = gpiochip_remove(&tgpio->gpio);
+ if (err)
+ printk(KERN_ERR DRIVER_NAME": failed to remove gpio_chip\n");
+
+ iounmap(tgpio->membase);
+ release_mem_region(iomem->start, resource_size(iomem));
+ kfree(tgpio);
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver timbgpio_platform_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = timbgpio_probe,
+ .remove = timbgpio_remove,
+};
+
+/*--------------------------------------------------------------------------*/
+
+static int __init timbgpio_init(void)
+{
+ return platform_driver_register(&timbgpio_platform_driver);
+}
+
+static void __exit timbgpio_exit(void)
+{
+ platform_driver_unregister(&timbgpio_platform_driver);
+}
+
+module_init(timbgpio_init);
+module_exit(timbgpio_exit);
+
+MODULE_DESCRIPTION("Timberdale GPIO driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Mocean Laboratories");
+MODULE_ALIAS("platform:"DRIVER_NAME);
+
diff --git a/drivers/misc/sgi-gru/gru.h b/drivers/misc/sgi-gru/gru.h
index f93f03a9e6e..3ad76cd18b4 100644
--- a/drivers/misc/sgi-gru/gru.h
+++ b/drivers/misc/sgi-gru/gru.h
@@ -53,6 +53,17 @@ struct gru_chiplet_info {
int free_user_cbr;
};
+/*
+ * Statictics kept for each context.
+ */
+struct gru_gseg_statistics {
+ unsigned long fmm_tlbmiss;
+ unsigned long upm_tlbmiss;
+ unsigned long tlbdropin;
+ unsigned long context_stolen;
+ unsigned long reserved[10];
+};
+
/* Flags for GRU options on the gru_create_context() call */
/* Select one of the follow 4 options to specify how TLB misses are handled */
#define GRU_OPT_MISS_DEFAULT 0x0000 /* Use default mode */
diff --git a/drivers/misc/sgi-gru/gru_instructions.h b/drivers/misc/sgi-gru/gru_instructions.h
index 3c9c06618e6..d95587cc794 100644
--- a/drivers/misc/sgi-gru/gru_instructions.h
+++ b/drivers/misc/sgi-gru/gru_instructions.h
@@ -34,17 +34,17 @@ extern void gru_wait_abort_proc(void *cb);
#include
#define __flush_cache(p) ia64_fc((unsigned long)p)
/* Use volatile on IA64 to ensure ordering via st4.rel */
-#define gru_ordered_store_int(p, v) \
+#define gru_ordered_store_ulong(p, v) \
do { \
barrier(); \
- *((volatile int *)(p)) = v; /* force st.rel */ \
+ *((volatile unsigned long *)(p)) = v; /* force st.rel */ \
} while (0)
#elif defined(CONFIG_X86_64)
#define __flush_cache(p) clflush(p)
-#define gru_ordered_store_int(p, v) \
+#define gru_ordered_store_ulong(p, v) \
do { \
barrier(); \
- *(int *)p = v; \
+ *(unsigned long *)p = v; \
} while (0)
#else
#error "Unsupported architecture"
@@ -129,8 +129,13 @@ struct gru_instruction_bits {
*/
struct gru_instruction {
/* DW 0 */
- unsigned int op32; /* icmd,xtype,iaa0,ima,opc */
- unsigned int tri0;
+ union {
+ unsigned long op64; /* icmd,xtype,iaa0,ima,opc,tri0 */
+ struct {
+ unsigned int op32;
+ unsigned int tri0;
+ };
+ };
unsigned long tri1_bufsize; /* DW 1 */
unsigned long baddr0; /* DW 2 */
unsigned long nelem; /* DW 3 */
@@ -140,7 +145,7 @@ struct gru_instruction {
unsigned long avalue; /* DW 7 */
};
-/* Some shifts and masks for the low 32 bits of a GRU command */
+/* Some shifts and masks for the low 64 bits of a GRU command */
#define GRU_CB_ICMD_SHFT 0
#define GRU_CB_ICMD_MASK 0x1
#define GRU_CB_XTYPE_SHFT 8
@@ -155,6 +160,10 @@ struct gru_instruction {
#define GRU_CB_OPC_MASK 0xff
#define GRU_CB_EXOPC_SHFT 24
#define GRU_CB_EXOPC_MASK 0xff
+#define GRU_IDEF2_SHFT 32
+#define GRU_IDEF2_MASK 0x3ffff
+#define GRU_ISTATUS_SHFT 56
+#define GRU_ISTATUS_MASK 0x3
/* GRU instruction opcodes (opc field) */
#define OP_NOP 0x00
@@ -256,6 +265,7 @@ struct gru_instruction {
#define CBE_CAUSE_PROTOCOL_STATE_DATA_ERROR (1 << 16)
#define CBE_CAUSE_RA_RESPONSE_DATA_ERROR (1 << 17)
#define CBE_CAUSE_HA_RESPONSE_DATA_ERROR (1 << 18)
+#define CBE_CAUSE_FORCED_ERROR (1 << 19)
/* CBE cbrexecstatus bits */
#define CBR_EXS_ABORT_OCC_BIT 0
@@ -264,13 +274,15 @@ struct gru_instruction {
#define CBR_EXS_QUEUED_BIT 3
#define CBR_EXS_TLB_INVAL_BIT 4
#define CBR_EXS_EXCEPTION_BIT 5
+#define CBR_EXS_CB_INT_PENDING_BIT 6
#define CBR_EXS_ABORT_OCC (1 << CBR_EXS_ABORT_OCC_BIT)
#define CBR_EXS_INT_OCC (1 << CBR_EXS_INT_OCC_BIT)
#define CBR_EXS_PENDING (1 << CBR_EXS_PENDING_BIT)
#define CBR_EXS_QUEUED (1 << CBR_EXS_QUEUED_BIT)
-#define CBR_TLB_INVAL (1 << CBR_EXS_TLB_INVAL_BIT)
+#define CBR_EXS_TLB_INVAL (1 << CBR_EXS_TLB_INVAL_BIT)
#define CBR_EXS_EXCEPTION (1 << CBR_EXS_EXCEPTION_BIT)
+#define CBR_EXS_CB_INT_PENDING (1 << CBR_EXS_CB_INT_PENDING_BIT)
/*
* Exceptions are retried for the following cases. If any OTHER bits are set
@@ -296,12 +308,14 @@ union gru_mesqhead {
/* Generate the low word of a GRU instruction */
-static inline unsigned int
-__opword(unsigned char opcode, unsigned char exopc, unsigned char xtype,
+static inline unsigned long
+__opdword(unsigned char opcode, unsigned char exopc, unsigned char xtype,
unsigned char iaa0, unsigned char iaa1,
- unsigned char ima)
+ unsigned long idef2, unsigned char ima)
{
return (1 << GRU_CB_ICMD_SHFT) |
+ ((unsigned long)CBS_ACTIVE << GRU_ISTATUS_SHFT) |
+ (idef2<< GRU_IDEF2_SHFT) |
(iaa0 << GRU_CB_IAA0_SHFT) |
(iaa1 << GRU_CB_IAA1_SHFT) |
(ima << GRU_CB_IMA_SHFT) |
@@ -319,12 +333,13 @@ static inline void gru_flush_cache(void *p)
}
/*
- * Store the lower 32 bits of the command including the "start" bit. Then
+ * Store the lower 64 bits of the command including the "start" bit. Then
* start the instruction executing.
*/
-static inline void gru_start_instruction(struct gru_instruction *ins, int op32)
+static inline void gru_start_instruction(struct gru_instruction *ins, unsigned long op64)
{
- gru_ordered_store_int(ins, op32);
+ gru_ordered_store_ulong(ins, op64);
+ mb();
gru_flush_cache(ins);
}
@@ -340,6 +355,30 @@ static inline void gru_start_instruction(struct gru_instruction *ins, int op32)
* - nelem and stride are in elements
* - tri0/tri1 is in bytes for the beginning of the data segment.
*/
+static inline void gru_vload_phys(void *cb, unsigned long gpa,
+ unsigned int tri0, int iaa, unsigned long hints)
+{
+ struct gru_instruction *ins = (struct gru_instruction *)cb;
+
+ ins->baddr0 = (long)gpa | ((unsigned long)iaa << 62);
+ ins->nelem = 1;
+ ins->op1_stride = 1;
+ gru_start_instruction(ins, __opdword(OP_VLOAD, 0, XTYPE_DW, iaa, 0,
+ (unsigned long)tri0, CB_IMA(hints)));
+}
+
+static inline void gru_vstore_phys(void *cb, unsigned long gpa,
+ unsigned int tri0, int iaa, unsigned long hints)
+{
+ struct gru_instruction *ins = (struct gru_instruction *)cb;
+
+ ins->baddr0 = (long)gpa | ((unsigned long)iaa << 62);
+ ins->nelem = 1;
+ ins->op1_stride = 1;
+ gru_start_instruction(ins, __opdword(OP_VSTORE, 0, XTYPE_DW, iaa, 0,
+ (unsigned long)tri0, CB_IMA(hints)));
+}
+
static inline void gru_vload(void *cb, unsigned long mem_addr,
unsigned int tri0, unsigned char xtype, unsigned long nelem,
unsigned long stride, unsigned long hints)
@@ -348,10 +387,9 @@ static inline void gru_vload(void *cb, unsigned long mem_addr,
ins->baddr0 = (long)mem_addr;
ins->nelem = nelem;
- ins->tri0 = tri0;
ins->op1_stride = stride;
- gru_start_instruction(ins, __opword(OP_VLOAD, 0, xtype, IAA_RAM, 0,
- CB_IMA(hints)));
+ gru_start_instruction(ins, __opdword(OP_VLOAD, 0, xtype, IAA_RAM, 0,
+ (unsigned long)tri0, CB_IMA(hints)));
}
static inline void gru_vstore(void *cb, unsigned long mem_addr,
@@ -362,10 +400,9 @@ static inline void gru_vstore(void *cb, unsigned long mem_addr,
ins->baddr0 = (long)mem_addr;
ins->nelem = nelem;
- ins->tri0 = tri0;
ins->op1_stride = stride;
- gru_start_instruction(ins, __opword(OP_VSTORE, 0, xtype, IAA_RAM, 0,
- CB_IMA(hints)));
+ gru_start_instruction(ins, __opdword(OP_VSTORE, 0, xtype, IAA_RAM, 0,
+ tri0, CB_IMA(hints)));
}
static inline void gru_ivload(void *cb, unsigned long mem_addr,
@@ -376,10 +413,9 @@ static inline void gru_ivload(void *cb, unsigned long mem_addr,
ins->baddr0 = (long)mem_addr;
ins->nelem = nelem;
- ins->tri0 = tri0;
ins->tri1_bufsize = tri1;
- gru_start_instruction(ins, __opword(OP_IVLOAD, 0, xtype, IAA_RAM, 0,
- CB_IMA(hints)));
+ gru_start_instruction(ins, __opdword(OP_IVLOAD, 0, xtype, IAA_RAM, 0,
+ tri0, CB_IMA(hints)));
}
static inline void gru_ivstore(void *cb, unsigned long mem_addr,
@@ -390,10 +426,9 @@ static inline void gru_ivstore(void *cb, unsigned long mem_addr,
ins->baddr0 = (long)mem_addr;
ins->nelem = nelem;
- ins->tri0 = tri0;
ins->tri1_bufsize = tri1;
- gru_start_instruction(ins, __opword(OP_IVSTORE, 0, xtype, IAA_RAM, 0,
- CB_IMA(hints)));
+ gru_start_instruction(ins, __opdword(OP_IVSTORE, 0, xtype, IAA_RAM, 0,
+ tri0, CB_IMA(hints)));
}
static inline void gru_vset(void *cb, unsigned long mem_addr,
@@ -406,8 +441,8 @@ static inline void gru_vset(void *cb, unsigned long mem_addr,
ins->op2_value_baddr1 = value;
ins->nelem = nelem;
ins->op1_stride = stride;
- gru_start_instruction(ins, __opword(OP_VSET, 0, xtype, IAA_RAM, 0,
- CB_IMA(hints)));
+ gru_start_instruction(ins, __opdword(OP_VSET, 0, xtype, IAA_RAM, 0,
+ 0, CB_IMA(hints)));
}
static inline void gru_ivset(void *cb, unsigned long mem_addr,
@@ -420,8 +455,8 @@ static inline void gru_ivset(void *cb, unsigned long mem_addr,
ins->op2_value_baddr1 = value;
ins->nelem = nelem;
ins->tri1_bufsize = tri1;
- gru_start_instruction(ins, __opword(OP_IVSET, 0, xtype, IAA_RAM, 0,
- CB_IMA(hints)));
+ gru_start_instruction(ins, __opdword(OP_IVSET, 0, xtype, IAA_RAM, 0,
+ 0, CB_IMA(hints)));
}
static inline void gru_vflush(void *cb, unsigned long mem_addr,
@@ -433,15 +468,15 @@ static inline void gru_vflush(void *cb, unsigned long mem_addr,
ins->baddr0 = (long)mem_addr;
ins->op1_stride = stride;
ins->nelem = nelem;
- gru_start_instruction(ins, __opword(OP_VFLUSH, 0, xtype, IAA_RAM, 0,
- CB_IMA(hints)));
+ gru_start_instruction(ins, __opdword(OP_VFLUSH, 0, xtype, IAA_RAM, 0,
+ 0, CB_IMA(hints)));
}
static inline void gru_nop(void *cb, int hints)
{
struct gru_instruction *ins = (void *)cb;
- gru_start_instruction(ins, __opword(OP_NOP, 0, 0, 0, 0, CB_IMA(hints)));
+ gru_start_instruction(ins, __opdword(OP_NOP, 0, 0, 0, 0, 0, CB_IMA(hints)));
}
@@ -455,10 +490,9 @@ static inline void gru_bcopy(void *cb, const unsigned long src,
ins->baddr0 = (long)src;
ins->op2_value_baddr1 = (long)dest;
ins->nelem = nelem;
- ins->tri0 = tri0;
ins->tri1_bufsize = bufsize;
- gru_start_instruction(ins, __opword(OP_BCOPY, 0, xtype, IAA_RAM,
- IAA_RAM, CB_IMA(hints)));
+ gru_start_instruction(ins, __opdword(OP_BCOPY, 0, xtype, IAA_RAM,
+ IAA_RAM, tri0, CB_IMA(hints)));
}
static inline void gru_bstore(void *cb, const unsigned long src,
@@ -470,9 +504,8 @@ static inline void gru_bstore(void *cb, const unsigned long src,
ins->baddr0 = (long)src;
ins->op2_value_baddr1 = (long)dest;
ins->nelem = nelem;
- ins->tri0 = tri0;
- gru_start_instruction(ins, __opword(OP_BSTORE, 0, xtype, 0, IAA_RAM,
- CB_IMA(hints)));
+ gru_start_instruction(ins, __opdword(OP_BSTORE, 0, xtype, 0, IAA_RAM,
+ tri0, CB_IMA(hints)));
}
static inline void gru_gamir(void *cb, int exopc, unsigned long src,
@@ -481,8 +514,8 @@ static inline void gru_gamir(void *cb, int exopc, unsigned long src,
struct gru_instruction *ins = (void *)cb;
ins->baddr0 = (long)src;
- gru_start_instruction(ins, __opword(OP_GAMIR, exopc, xtype, IAA_RAM, 0,
- CB_IMA(hints)));
+ gru_start_instruction(ins, __opdword(OP_GAMIR, exopc, xtype, IAA_RAM, 0,
+ 0, CB_IMA(hints)));
}
static inline void gru_gamirr(void *cb, int exopc, unsigned long src,
@@ -491,8 +524,8 @@ static inline void gru_gamirr(void *cb, int exopc, unsigned long src,
struct gru_instruction *ins = (void *)cb;
ins->baddr0 = (long)src;
- gru_start_instruction(ins, __opword(OP_GAMIRR, exopc, xtype, IAA_RAM, 0,
- CB_IMA(hints)));
+ gru_start_instruction(ins, __opdword(OP_GAMIRR, exopc, xtype, IAA_RAM, 0,
+ 0, CB_IMA(hints)));
}
static inline void gru_gamer(void *cb, int exopc, unsigned long src,
@@ -505,8 +538,8 @@ static inline void gru_gamer(void *cb, int exopc, unsigned long src,
ins->baddr0 = (long)src;
ins->op1_stride = operand1;
ins->op2_value_baddr1 = operand2;
- gru_start_instruction(ins, __opword(OP_GAMER, exopc, xtype, IAA_RAM, 0,
- CB_IMA(hints)));
+ gru_start_instruction(ins, __opdword(OP_GAMER, exopc, xtype, IAA_RAM, 0,
+ 0, CB_IMA(hints)));
}
static inline void gru_gamerr(void *cb, int exopc, unsigned long src,
@@ -518,8 +551,8 @@ static inline void gru_gamerr(void *cb, int exopc, unsigned long src,
ins->baddr0 = (long)src;
ins->op1_stride = operand1;
ins->op2_value_baddr1 = operand2;
- gru_start_instruction(ins, __opword(OP_GAMERR, exopc, xtype, IAA_RAM, 0,
- CB_IMA(hints)));
+ gru_start_instruction(ins, __opdword(OP_GAMERR, exopc, xtype, IAA_RAM, 0,
+ 0, CB_IMA(hints)));
}
static inline void gru_gamxr(void *cb, unsigned long src,
@@ -529,8 +562,8 @@ static inline void gru_gamxr(void *cb, unsigned long src,
ins->baddr0 = (long)src;
ins->nelem = 4;
- gru_start_instruction(ins, __opword(OP_GAMXR, EOP_XR_CSWAP, XTYPE_DW,
- IAA_RAM, 0, CB_IMA(hints)));
+ gru_start_instruction(ins, __opdword(OP_GAMXR, EOP_XR_CSWAP, XTYPE_DW,
+ IAA_RAM, 0, 0, CB_IMA(hints)));
}
static inline void gru_mesq(void *cb, unsigned long queue,
@@ -541,9 +574,8 @@ static inline void gru_mesq(void *cb, unsigned long queue,
ins->baddr0 = (long)queue;
ins->nelem = nelem;
- ins->tri0 = tri0;
- gru_start_instruction(ins, __opword(OP_MESQ, 0, XTYPE_CL, IAA_RAM, 0,
- CB_IMA(hints)));
+ gru_start_instruction(ins, __opdword(OP_MESQ, 0, XTYPE_CL, IAA_RAM, 0,
+ tri0, CB_IMA(hints)));
}
static inline unsigned long gru_get_amo_value(void *cb)
@@ -662,6 +694,14 @@ static inline void gru_wait_abort(void *cb)
gru_wait_abort_proc(cb);
}
+/*
+ * Get a pointer to the start of a gseg
+ * p - Any valid pointer within the gseg
+ */
+static inline void *gru_get_gseg_pointer (void *p)
+{
+ return (void *)((unsigned long)p & ~(GRU_GSEG_PAGESIZE - 1));
+}
/*
* Get a pointer to a control block
diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c
index 679e0177828..38657cdaf54 100644
--- a/drivers/misc/sgi-gru/grufault.c
+++ b/drivers/misc/sgi-gru/grufault.c
@@ -40,6 +40,12 @@
#include "gru_instructions.h"
#include
+/* Return codes for vtop functions */
+#define VTOP_SUCCESS 0
+#define VTOP_INVALID -1
+#define VTOP_RETRY -2
+
+
/*
* Test if a physical address is a valid GRU GSEG address
*/
@@ -90,19 +96,22 @@ static struct gru_thread_state *gru_alloc_locked_gts(unsigned long vaddr)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
- struct gru_thread_state *gts = NULL;
+ struct gru_thread_state *gts = ERR_PTR(-EINVAL);
down_write(&mm->mmap_sem);
vma = gru_find_vma(vaddr);
- if (vma)
- gts = gru_alloc_thread_state(vma, TSID(vaddr, vma));
- if (gts) {
- mutex_lock(>s->ts_ctxlock);
- downgrade_write(&mm->mmap_sem);
- } else {
- up_write(&mm->mmap_sem);
- }
+ if (!vma)
+ goto err;
+ gts = gru_alloc_thread_state(vma, TSID(vaddr, vma));
+ if (IS_ERR(gts))
+ goto err;
+ mutex_lock(>s->ts_ctxlock);
+ downgrade_write(&mm->mmap_sem);
+ return gts;
+
+err:
+ up_write(&mm->mmap_sem);
return gts;
}
@@ -122,38 +131,14 @@ static void gru_unlock_gts(struct gru_thread_state *gts)
* is necessary to prevent the user from seeing a stale cb.istatus that will
* change as soon as the TFH restart is complete. Races may cause an
* occasional failure to clear the cb.istatus, but that is ok.
- *
- * If the cb address is not valid (should not happen, but...), nothing
- * bad will happen.. The get_user()/put_user() will fail but there
- * are no bad side-effects.
*/
-static void gru_cb_set_istatus_active(unsigned long __user *cb)
+static void gru_cb_set_istatus_active(struct gru_instruction_bits *cbk)
{
- union {
- struct gru_instruction_bits bits;
- unsigned long dw;
- } u;
-
- if (cb) {
- get_user(u.dw, cb);
- u.bits.istatus = CBS_ACTIVE;
- put_user(u.dw, cb);
+ if (cbk) {
+ cbk->istatus = CBS_ACTIVE;
}
}
-/*
- * Convert a interrupt IRQ to a pointer to the GRU GTS that caused the
- * interrupt. Interrupts are always sent to a cpu on the blade that contains the
- * GRU (except for headless blades which are not currently supported). A blade
- * has N grus; a block of N consecutive IRQs is assigned to the GRUs. The IRQ
- * number uniquely identifies the GRU chiplet on the local blade that caused the
- * interrupt. Always called in interrupt context.
- */
-static inline struct gru_state *irq_to_gru(int irq)
-{
- return &gru_base[uv_numa_blade_id()]->bs_grus[irq - IRQ_GRU];
-}
-
/*
* Read & clear a TFM
*
@@ -207,10 +192,11 @@ static int non_atomic_pte_lookup(struct vm_area_struct *vma,
{
struct page *page;
- /* ZZZ Need to handle HUGE pages */
- if (is_vm_hugetlb_page(vma))
- return -EFAULT;
+#ifdef CONFIG_HUGETLB_PAGE
+ *pageshift = is_vm_hugetlb_page(vma) ? HPAGE_SHIFT : PAGE_SHIFT;
+#else
*pageshift = PAGE_SHIFT;
+#endif
if (get_user_pages
(current, current->mm, vaddr, 1, write, 0, &page, NULL) <= 0)
return -EFAULT;
@@ -268,7 +254,6 @@ static int atomic_pte_lookup(struct vm_area_struct *vma, unsigned long vaddr,
return 0;
err:
- local_irq_enable();
return 1;
}
@@ -301,15 +286,70 @@ static int gru_vtop(struct gru_thread_state *gts, unsigned long vaddr,
paddr = paddr & ~((1UL << ps) - 1);
*gpa = uv_soc_phys_ram_to_gpa(paddr);
*pageshift = ps;
- return 0;
+ return VTOP_SUCCESS;
inval:
- return -1;
+ return VTOP_INVALID;
upm:
- return -2;
+ return VTOP_RETRY;
}
+/*
+ * Flush a CBE from cache. The CBE is clean in the cache. Dirty the
+ * CBE cacheline so that the line will be written back to home agent.
+ * Otherwise the line may be silently dropped. This has no impact
+ * except on performance.
+ */
+static void gru_flush_cache_cbe(struct gru_control_block_extended *cbe)
+{
+ if (unlikely(cbe)) {
+ cbe->cbrexecstatus = 0; /* make CL dirty */
+ gru_flush_cache(cbe);
+ }
+}
+
+/*
+ * Preload the TLB with entries that may be required. Currently, preloading
+ * is implemented only for BCOPY. Preload pages OR to
+ * the end of the bcopy tranfer, whichever is smaller.
+ */
+static void gru_preload_tlb(struct gru_state *gru,
+ struct gru_thread_state *gts, int atomic,
+ unsigned long fault_vaddr, int asid, int write,
+ unsigned char tlb_preload_count,
+ struct gru_tlb_fault_handle *tfh,
+ struct gru_control_block_extended *cbe)
+{
+ unsigned long vaddr = 0, gpa;
+ int ret, pageshift;
+
+ if (cbe->opccpy != OP_BCOPY)
+ return;
+
+ if (fault_vaddr == cbe->cbe_baddr0)
+ vaddr = fault_vaddr + GRU_CACHE_LINE_BYTES * cbe->cbe_src_cl - 1;
+ else if (fault_vaddr == cbe->cbe_baddr1)
+ vaddr = fault_vaddr + (1 << cbe->xtypecpy) * cbe->cbe_nelemcur - 1;
+
+ fault_vaddr &= PAGE_MASK;
+ vaddr &= PAGE_MASK;
+ vaddr = min(vaddr, fault_vaddr + tlb_preload_count * PAGE_SIZE);
+
+ while (vaddr > fault_vaddr) {
+ ret = gru_vtop(gts, vaddr, write, atomic, &gpa, &pageshift);
+ if (ret || tfh_write_only(tfh, gpa, GAA_RAM, vaddr, asid, write,
+ GRU_PAGESIZE(pageshift)))
+ return;
+ gru_dbg(grudev,
+ "%s: gid %d, gts 0x%p, tfh 0x%p, vaddr 0x%lx, asid 0x%x, rw %d, ps %d, gpa 0x%lx\n",
+ atomic ? "atomic" : "non-atomic", gru->gs_gid, gts, tfh,
+ vaddr, asid, write, pageshift, gpa);
+ vaddr -= PAGE_SIZE;
+ STAT(tlb_preload_page);
+ }
+}
+
/*
* Drop a TLB entry into the GRU. The fault is described by info in an TFH.
* Input:
@@ -320,11 +360,14 @@ upm:
* < 0 = error code
*
*/
-static int gru_try_dropin(struct gru_thread_state *gts,
+static int gru_try_dropin(struct gru_state *gru,
+ struct gru_thread_state *gts,
struct gru_tlb_fault_handle *tfh,
- unsigned long __user *cb)
+ struct gru_instruction_bits *cbk)
{
- int pageshift = 0, asid, write, ret, atomic = !cb;
+ struct gru_control_block_extended *cbe = NULL;
+ unsigned char tlb_preload_count = gts->ts_tlb_preload_count;
+ int pageshift = 0, asid, write, ret, atomic = !cbk, indexway;
unsigned long gpa = 0, vaddr = 0;
/*
@@ -334,6 +377,14 @@ static int gru_try_dropin(struct gru_thread_state *gts,
* the dropin is ignored. This eliminates the need for additional locks.
*/
+ /*
+ * Prefetch the CBE if doing TLB preloading
+ */
+ if (unlikely(tlb_preload_count)) {
+ cbe = gru_tfh_to_cbe(tfh);
+ prefetchw(cbe);
+ }
+
/*
* Error if TFH state is IDLE or FMM mode & the user issuing a UPM call.
* Might be a hardware race OR a stupid user. Ignore FMM because FMM
@@ -341,18 +392,20 @@ static int gru_try_dropin(struct gru_thread_state *gts,
*/
if (tfh->status != TFHSTATUS_EXCEPTION) {
gru_flush_cache(tfh);
+ sync_core();
if (tfh->status != TFHSTATUS_EXCEPTION)
goto failnoexception;
STAT(tfh_stale_on_fault);
}
if (tfh->state == TFHSTATE_IDLE)
goto failidle;
- if (tfh->state == TFHSTATE_MISS_FMM && cb)
+ if (tfh->state == TFHSTATE_MISS_FMM && cbk)
goto failfmm;
write = (tfh->cause & TFHCAUSE_TLB_MOD) != 0;
vaddr = tfh->missvaddr;
asid = tfh->missasid;
+ indexway = tfh->indexway;
if (asid == 0)
goto failnoasid;
@@ -366,41 +419,51 @@ static int gru_try_dropin(struct gru_thread_state *gts,
goto failactive;
ret = gru_vtop(gts, vaddr, write, atomic, &gpa, &pageshift);
- if (ret == -1)
+ if (ret == VTOP_INVALID)
goto failinval;
- if (ret == -2)
+ if (ret == VTOP_RETRY)
goto failupm;
if (!(gts->ts_sizeavail & GRU_SIZEAVAIL(pageshift))) {
gts->ts_sizeavail |= GRU_SIZEAVAIL(pageshift);
- if (atomic || !gru_update_cch(gts, 0)) {
+ if (atomic || !gru_update_cch(gts)) {
gts->ts_force_cch_reload = 1;
goto failupm;
}
}
- gru_cb_set_istatus_active(cb);
+
+ if (unlikely(cbe) && pageshift == PAGE_SHIFT) {
+ gru_preload_tlb(gru, gts, atomic, vaddr, asid, write, tlb_preload_count, tfh, cbe);
+ gru_flush_cache_cbe(cbe);
+ }
+
+ gru_cb_set_istatus_active(cbk);
+ gts->ustats.tlbdropin++;
tfh_write_restart(tfh, gpa, GAA_RAM, vaddr, asid, write,
GRU_PAGESIZE(pageshift));
- STAT(tlb_dropin);
gru_dbg(grudev,
- "%s: tfh 0x%p, vaddr 0x%lx, asid 0x%x, ps %d, gpa 0x%lx\n",
- ret ? "non-atomic" : "atomic", tfh, vaddr, asid,
- pageshift, gpa);
+ "%s: gid %d, gts 0x%p, tfh 0x%p, vaddr 0x%lx, asid 0x%x, indexway 0x%x,"
+ " rw %d, ps %d, gpa 0x%lx\n",
+ atomic ? "atomic" : "non-atomic", gru->gs_gid, gts, tfh, vaddr, asid,
+ indexway, write, pageshift, gpa);
+ STAT(tlb_dropin);
return 0;
failnoasid:
/* No asid (delayed unload). */
STAT(tlb_dropin_fail_no_asid);
gru_dbg(grudev, "FAILED no_asid tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
- if (!cb)
+ if (!cbk)
tfh_user_polling_mode(tfh);
else
gru_flush_cache(tfh);
+ gru_flush_cache_cbe(cbe);
return -EAGAIN;
failupm:
/* Atomic failure switch CBR to UPM */
tfh_user_polling_mode(tfh);
+ gru_flush_cache_cbe(cbe);
STAT(tlb_dropin_fail_upm);
gru_dbg(grudev, "FAILED upm tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
return 1;
@@ -408,6 +471,7 @@ failupm:
failfmm:
/* FMM state on UPM call */
gru_flush_cache(tfh);
+ gru_flush_cache_cbe(cbe);
STAT(tlb_dropin_fail_fmm);
gru_dbg(grudev, "FAILED fmm tfh: 0x%p, state %d\n", tfh, tfh->state);
return 0;
@@ -415,17 +479,20 @@ failfmm:
failnoexception:
/* TFH status did not show exception pending */
gru_flush_cache(tfh);
- if (cb)
- gru_flush_cache(cb);
+ gru_flush_cache_cbe(cbe);
+ if (cbk)
+ gru_flush_cache(cbk);
STAT(tlb_dropin_fail_no_exception);
- gru_dbg(grudev, "FAILED non-exception tfh: 0x%p, status %d, state %d\n", tfh, tfh->status, tfh->state);
+ gru_dbg(grudev, "FAILED non-exception tfh: 0x%p, status %d, state %d\n",
+ tfh, tfh->status, tfh->state);
return 0;
failidle:
/* TFH state was idle - no miss pending */
gru_flush_cache(tfh);
- if (cb)
- gru_flush_cache(cb);
+ gru_flush_cache_cbe(cbe);
+ if (cbk)
+ gru_flush_cache(cbk);
STAT(tlb_dropin_fail_idle);
gru_dbg(grudev, "FAILED idle tfh: 0x%p, state %d\n", tfh, tfh->state);
return 0;
@@ -433,16 +500,18 @@ failidle:
failinval:
/* All errors (atomic & non-atomic) switch CBR to EXCEPTION state */
tfh_exception(tfh);
+ gru_flush_cache_cbe(cbe);
STAT(tlb_dropin_fail_invalid);
gru_dbg(grudev, "FAILED inval tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
return -EFAULT;
failactive:
/* Range invalidate active. Switch to UPM iff atomic */
- if (!cb)
+ if (!cbk)
tfh_user_polling_mode(tfh);
else
gru_flush_cache(tfh);
+ gru_flush_cache_cbe(cbe);
STAT(tlb_dropin_fail_range_active);
gru_dbg(grudev, "FAILED range active: tfh 0x%p, vaddr 0x%lx\n",
tfh, vaddr);
@@ -455,31 +524,41 @@ failactive:
* Note that this is the interrupt handler that is registered with linux
* interrupt handlers.
*/
-irqreturn_t gru_intr(int irq, void *dev_id)
+static irqreturn_t gru_intr(int chiplet, int blade)
{
struct gru_state *gru;
struct gru_tlb_fault_map imap, dmap;
struct gru_thread_state *gts;
struct gru_tlb_fault_handle *tfh = NULL;
+ struct completion *cmp;
int cbrnum, ctxnum;
STAT(intr);
- gru = irq_to_gru(irq);
+ gru = &gru_base[blade]->bs_grus[chiplet];
if (!gru) {
- dev_err(grudev, "GRU: invalid interrupt: cpu %d, irq %d\n",
- raw_smp_processor_id(), irq);
+ dev_err(grudev, "GRU: invalid interrupt: cpu %d, chiplet %d\n",
+ raw_smp_processor_id(), chiplet);
return IRQ_NONE;
}
get_clear_fault_map(gru, &imap, &dmap);
+ gru_dbg(grudev,
+ "cpu %d, chiplet %d, gid %d, imap %016lx %016lx, dmap %016lx %016lx\n",
+ smp_processor_id(), chiplet, gru->gs_gid,
+ imap.fault_bits[0], imap.fault_bits[1],
+ dmap.fault_bits[0], dmap.fault_bits[1]);
for_each_cbr_in_tfm(cbrnum, dmap.fault_bits) {
- complete(gru->gs_blade->bs_async_wq);
+ STAT(intr_cbr);
+ cmp = gru->gs_blade->bs_async_wq;
+ if (cmp)
+ complete(cmp);
gru_dbg(grudev, "gid %d, cbr_done %d, done %d\n",
- gru->gs_gid, cbrnum, gru->gs_blade->bs_async_wq->done);
+ gru->gs_gid, cbrnum, cmp ? cmp->done : -1);
}
for_each_cbr_in_tfm(cbrnum, imap.fault_bits) {
+ STAT(intr_tfh);
tfh = get_tfh_by_index(gru, cbrnum);
prefetchw(tfh); /* Helps on hdw, required for emulator */
@@ -492,14 +571,20 @@ irqreturn_t gru_intr(int irq, void *dev_id)
ctxnum = tfh->ctxnum;
gts = gru->gs_gts[ctxnum];
+ /* Spurious interrupts can cause this. Ignore. */
+ if (!gts) {
+ STAT(intr_spurious);
+ continue;
+ }
+
/*
* This is running in interrupt context. Trylock the mmap_sem.
* If it fails, retry the fault in user context.
*/
+ gts->ustats.fmm_tlbmiss++;
if (!gts->ts_force_cch_reload &&
down_read_trylock(>s->ts_mm->mmap_sem)) {
- gts->ustats.fmm_tlbdropin++;
- gru_try_dropin(gts, tfh, NULL);
+ gru_try_dropin(gru, gts, tfh, NULL);
up_read(>s->ts_mm->mmap_sem);
} else {
tfh_user_polling_mode(tfh);
@@ -509,20 +594,43 @@ irqreturn_t gru_intr(int irq, void *dev_id)
return IRQ_HANDLED;
}
+irqreturn_t gru0_intr(int irq, void *dev_id)
+{
+ return gru_intr(0, uv_numa_blade_id());
+}
+
+irqreturn_t gru1_intr(int irq, void *dev_id)
+{
+ return gru_intr(1, uv_numa_blade_id());
+}
+
+irqreturn_t gru_intr_mblade(int irq, void *dev_id)
+{
+ int blade;
+
+ for_each_possible_blade(blade) {
+ if (uv_blade_nr_possible_cpus(blade))
+ continue;
+ gru_intr(0, blade);
+ gru_intr(1, blade);
+ }
+ return IRQ_HANDLED;
+}
+
static int gru_user_dropin(struct gru_thread_state *gts,
struct gru_tlb_fault_handle *tfh,
- unsigned long __user *cb)
+ void *cb)
{
struct gru_mm_struct *gms = gts->ts_gms;
int ret;
- gts->ustats.upm_tlbdropin++;
+ gts->ustats.upm_tlbmiss++;
while (1) {
wait_event(gms->ms_wait_queue,
atomic_read(&gms->ms_range_active) == 0);
prefetchw(tfh); /* Helps on hdw, required for emulator */
- ret = gru_try_dropin(gts, tfh, cb);
+ ret = gru_try_dropin(gts->ts_gru, gts, tfh, cb);
if (ret <= 0)
return ret;
STAT(call_os_wait_queue);
@@ -538,52 +646,41 @@ int gru_handle_user_call_os(unsigned long cb)
{
struct gru_tlb_fault_handle *tfh;
struct gru_thread_state *gts;
- unsigned long __user *cbp;
+ void *cbk;
int ucbnum, cbrnum, ret = -EINVAL;
STAT(call_os);
- gru_dbg(grudev, "address 0x%lx\n", cb);
/* sanity check the cb pointer */
ucbnum = get_cb_number((void *)cb);
if ((cb & (GRU_HANDLE_STRIDE - 1)) || ucbnum >= GRU_NUM_CB)
return -EINVAL;
- cbp = (unsigned long *)cb;
gts = gru_find_lock_gts(cb);
if (!gts)
return -EINVAL;
+ gru_dbg(grudev, "address 0x%lx, gid %d, gts 0x%p\n", cb, gts->ts_gru ? gts->ts_gru->gs_gid : -1, gts);
if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE)
goto exit;
- /*
- * If force_unload is set, the UPM TLB fault is phony. The task
- * has migrated to another node and the GSEG must be moved. Just
- * unload the context. The task will page fault and assign a new
- * context.
- */
- if (gts->ts_tgid_owner == current->tgid && gts->ts_blade >= 0 &&
- gts->ts_blade != uv_numa_blade_id()) {
- STAT(call_os_offnode_reference);
- gts->ts_force_unload = 1;
- }
+ gru_check_context_placement(gts);
/*
* CCH may contain stale data if ts_force_cch_reload is set.
*/
if (gts->ts_gru && gts->ts_force_cch_reload) {
gts->ts_force_cch_reload = 0;
- gru_update_cch(gts, 0);
+ gru_update_cch(gts);
}
ret = -EAGAIN;
cbrnum = thread_cbr_number(gts, ucbnum);
- if (gts->ts_force_unload) {
- gru_unload_context(gts, 1);
- } else if (gts->ts_gru) {
+ if (gts->ts_gru) {
tfh = get_tfh_by_index(gts->ts_gru, cbrnum);
- ret = gru_user_dropin(gts, tfh, cbp);
+ cbk = get_gseg_base_address_cb(gts->ts_gru->gs_gru_base_vaddr,
+ gts->ts_ctxnum, ucbnum);
+ ret = gru_user_dropin(gts, tfh, cbk);
}
exit:
gru_unlock_gts(gts);
@@ -605,11 +702,11 @@ int gru_get_exception_detail(unsigned long arg)
if (copy_from_user(&excdet, (void __user *)arg, sizeof(excdet)))
return -EFAULT;
- gru_dbg(grudev, "address 0x%lx\n", excdet.cb);
gts = gru_find_lock_gts(excdet.cb);
if (!gts)
return -EINVAL;
+ gru_dbg(grudev, "address 0x%lx, gid %d, gts 0x%p\n", excdet.cb, gts->ts_gru ? gts->ts_gru->gs_gid : -1, gts);
ucbnum = get_cb_number((void *)excdet.cb);
if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE) {
ret = -EINVAL;
@@ -617,6 +714,7 @@ int gru_get_exception_detail(unsigned long arg)
cbrnum = thread_cbr_number(gts, ucbnum);
cbe = get_cbe_by_index(gts->ts_gru, cbrnum);
gru_flush_cache(cbe); /* CBE not coherent */
+ sync_core(); /* make sure we are have current data */
excdet.opc = cbe->opccpy;
excdet.exopc = cbe->exopccpy;
excdet.ecause = cbe->ecause;
@@ -624,7 +722,7 @@ int gru_get_exception_detail(unsigned long arg)
excdet.exceptdet1 = cbe->idef3upd;
excdet.cbrstate = cbe->cbrstate;
excdet.cbrexecstatus = cbe->cbrexecstatus;
- gru_flush_cache(cbe);
+ gru_flush_cache_cbe(cbe);
ret = 0;
} else {
ret = -EAGAIN;
@@ -733,6 +831,11 @@ long gru_get_gseg_statistics(unsigned long arg)
if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
return -EFAULT;
+ /*
+ * The library creates arrays of contexts for threaded programs.
+ * If no gts exists in the array, the context has never been used & all
+ * statistics are implicitly 0.
+ */
gts = gru_find_lock_gts(req.gseg);
if (gts) {
memcpy(&req.stats, >s->ustats, sizeof(gts->ustats));
@@ -762,11 +865,25 @@ int gru_set_context_option(unsigned long arg)
return -EFAULT;
gru_dbg(grudev, "op %d, gseg 0x%lx, value1 0x%lx\n", req.op, req.gseg, req.val1);
- gts = gru_alloc_locked_gts(req.gseg);
- if (!gts)
- return -EINVAL;
+ gts = gru_find_lock_gts(req.gseg);
+ if (!gts) {
+ gts = gru_alloc_locked_gts(req.gseg);
+ if (IS_ERR(gts))
+ return PTR_ERR(gts);
+ }
switch (req.op) {
+ case sco_blade_chiplet:
+ /* Select blade/chiplet for GRU context */
+ if (req.val1 < -1 || req.val1 >= GRU_MAX_BLADES || !gru_base[req.val1] ||
+ req.val0 < -1 || req.val0 >= GRU_CHIPLETS_PER_HUB) {
+ ret = -EINVAL;
+ } else {
+ gts->ts_user_blade_id = req.val1;
+ gts->ts_user_chiplet_id = req.val0;
+ gru_check_context_placement(gts);
+ }
+ break;
case sco_gseg_owner:
/* Register the current task as the GSEG owner */
gts->ts_tgid_owner = current->tgid;
diff --git a/drivers/misc/sgi-gru/grufile.c b/drivers/misc/sgi-gru/grufile.c
index ce5eda985ab..cb3b4d22847 100644
--- a/drivers/misc/sgi-gru/grufile.c
+++ b/drivers/misc/sgi-gru/grufile.c
@@ -35,6 +35,9 @@
#include
#include
#include
+#ifdef CONFIG_X86_64
+#include
+#endif
#include
#include "gru.h"
#include "grulib.h"
@@ -130,7 +133,6 @@ static int gru_create_new_context(unsigned long arg)
struct gru_vma_data *vdata;
int ret = -EINVAL;
-
if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
return -EFAULT;
@@ -150,6 +152,7 @@ static int gru_create_new_context(unsigned long arg)
vdata->vd_dsr_au_count =
GRU_DS_BYTES_TO_AU(req.data_segment_bytes);
vdata->vd_cbr_au_count = GRU_CB_COUNT_TO_AU(req.control_blocks);
+ vdata->vd_tlb_preload_count = req.tlb_preload_count;
ret = 0;
}
up_write(¤t->mm->mmap_sem);
@@ -190,7 +193,7 @@ static long gru_file_unlocked_ioctl(struct file *file, unsigned int req,
{
int err = -EBADRQC;
- gru_dbg(grudev, "file %p\n", file);
+ gru_dbg(grudev, "file %p, req 0x%x, 0x%lx\n", file, req, arg);
switch (req) {
case GRU_CREATE_CONTEXT:
@@ -232,23 +235,24 @@ static long gru_file_unlocked_ioctl(struct file *file, unsigned int req,
* system.
*/
static void gru_init_chiplet(struct gru_state *gru, unsigned long paddr,
- void *vaddr, int nid, int bid, int grunum)
+ void *vaddr, int blade_id, int chiplet_id)
{
spin_lock_init(&gru->gs_lock);
spin_lock_init(&gru->gs_asid_lock);
gru->gs_gru_base_paddr = paddr;
gru->gs_gru_base_vaddr = vaddr;
- gru->gs_gid = bid * GRU_CHIPLETS_PER_BLADE + grunum;
- gru->gs_blade = gru_base[bid];
- gru->gs_blade_id = bid;
+ gru->gs_gid = blade_id * GRU_CHIPLETS_PER_BLADE + chiplet_id;
+ gru->gs_blade = gru_base[blade_id];
+ gru->gs_blade_id = blade_id;
+ gru->gs_chiplet_id = chiplet_id;
gru->gs_cbr_map = (GRU_CBR_AU == 64) ? ~0 : (1UL << GRU_CBR_AU) - 1;
gru->gs_dsr_map = (1UL << GRU_DSR_AU) - 1;
gru->gs_asid_limit = MAX_ASID;
gru_tgh_flush_init(gru);
if (gru->gs_gid >= gru_max_gids)
gru_max_gids = gru->gs_gid + 1;
- gru_dbg(grudev, "bid %d, nid %d, gid %d, vaddr %p (0x%lx)\n",
- bid, nid, gru->gs_gid, gru->gs_gru_base_vaddr,
+ gru_dbg(grudev, "bid %d, gid %d, vaddr %p (0x%lx)\n",
+ blade_id, gru->gs_gid, gru->gs_gru_base_vaddr,
gru->gs_gru_base_paddr);
}
@@ -264,12 +268,10 @@ static int gru_init_tables(unsigned long gru_base_paddr, void *gru_base_vaddr)
max_user_cbrs = GRU_NUM_CB;
max_user_dsr_bytes = GRU_NUM_DSR_BYTES;
- for_each_online_node(nid) {
- bid = uv_node_to_blade_id(nid);
- pnode = uv_node_to_pnode(nid);
- if (bid < 0 || gru_base[bid])
- continue;
- page = alloc_pages_exact_node(nid, GFP_KERNEL, order);
+ for_each_possible_blade(bid) {
+ pnode = uv_blade_to_pnode(bid);
+ nid = uv_blade_to_memory_nid(bid);/* -1 if no memory on blade */
+ page = alloc_pages_node(nid, GFP_KERNEL, order);
if (!page)
goto fail;
gru_base[bid] = page_address(page);
@@ -285,7 +287,7 @@ static int gru_init_tables(unsigned long gru_base_paddr, void *gru_base_vaddr)
chip++, gru++) {
paddr = gru_chiplet_paddr(gru_base_paddr, pnode, chip);
vaddr = gru_chiplet_vaddr(gru_base_vaddr, pnode, chip);
- gru_init_chiplet(gru, paddr, vaddr, nid, bid, chip);
+ gru_init_chiplet(gru, paddr, vaddr, bid, chip);
n = hweight64(gru->gs_cbr_map) * GRU_CBR_AU_SIZE;
cbrs = max(cbrs, n);
n = hweight64(gru->gs_dsr_map) * GRU_DSR_AU_BYTES;
@@ -298,39 +300,215 @@ static int gru_init_tables(unsigned long gru_base_paddr, void *gru_base_vaddr)
return 0;
fail:
- for (nid--; nid >= 0; nid--)
- free_pages((unsigned long)gru_base[nid], order);
+ for (bid--; bid >= 0; bid--)
+ free_pages((unsigned long)gru_base[bid], order);
return -ENOMEM;
}
+static void gru_free_tables(void)
+{
+ int bid;
+ int order = get_order(sizeof(struct gru_state) *
+ GRU_CHIPLETS_PER_BLADE);
+
+ for (bid = 0; bid < GRU_MAX_BLADES; bid++)
+ free_pages((unsigned long)gru_base[bid], order);
+}
+
+static unsigned long gru_chiplet_cpu_to_mmr(int chiplet, int cpu, int *corep)
+{
+ unsigned long mmr = 0;
+ int core;
+
+ /*
+ * We target the cores of a blade and not the hyperthreads themselves.
+ * There is a max of 8 cores per socket and 2 sockets per blade,
+ * making for a max total of 16 cores (i.e., 16 CPUs without
+ * hyperthreading and 32 CPUs with hyperthreading).
+ */
+ core = uv_cpu_core_number(cpu) + UV_MAX_INT_CORES * uv_cpu_socket_number(cpu);
+ if (core >= GRU_NUM_TFM || uv_cpu_ht_number(cpu))
+ return 0;
+
+ if (chiplet == 0) {
+ mmr = UVH_GR0_TLB_INT0_CONFIG +
+ core * (UVH_GR0_TLB_INT1_CONFIG - UVH_GR0_TLB_INT0_CONFIG);
+ } else if (chiplet == 1) {
+ mmr = UVH_GR1_TLB_INT0_CONFIG +
+ core * (UVH_GR1_TLB_INT1_CONFIG - UVH_GR1_TLB_INT0_CONFIG);
+ } else {
+ BUG();
+ }
+
+ *corep = core;
+ return mmr;
+}
+
#ifdef CONFIG_IA64
-static int get_base_irq(void)
+static int gru_irq_count[GRU_CHIPLETS_PER_BLADE];
+
+static void gru_noop(unsigned int irq)
{
- return IRQ_GRU;
+}
+
+static struct irq_chip gru_chip[GRU_CHIPLETS_PER_BLADE] = {
+ [0 ... GRU_CHIPLETS_PER_BLADE - 1] {
+ .mask = gru_noop,
+ .unmask = gru_noop,
+ .ack = gru_noop
+ }
+};
+
+static int gru_chiplet_setup_tlb_irq(int chiplet, char *irq_name,
+ irq_handler_t irq_handler, int cpu, int blade)
+{
+ unsigned long mmr;
+ int irq = IRQ_GRU + chiplet;
+ int ret, core;
+
+ mmr = gru_chiplet_cpu_to_mmr(chiplet, cpu, &core);
+ if (mmr == 0)
+ return 0;
+
+ if (gru_irq_count[chiplet] == 0) {
+ gru_chip[chiplet].name = irq_name;
+ ret = set_irq_chip(irq, &gru_chip[chiplet]);
+ if (ret) {
+ printk(KERN_ERR "%s: set_irq_chip failed, errno=%d\n",
+ GRU_DRIVER_ID_STR, -ret);
+ return ret;
+ }
+
+ ret = request_irq(irq, irq_handler, 0, irq_name, NULL);
+ if (ret) {
+ printk(KERN_ERR "%s: request_irq failed, errno=%d\n",
+ GRU_DRIVER_ID_STR, -ret);
+ return ret;
+ }
+ }
+ gru_irq_count[chiplet]++;
+
+ return 0;
+}
+
+static void gru_chiplet_teardown_tlb_irq(int chiplet, int cpu, int blade)
+{
+ unsigned long mmr;
+ int core, irq = IRQ_GRU + chiplet;
+
+ if (gru_irq_count[chiplet] == 0)
+ return;
+
+ mmr = gru_chiplet_cpu_to_mmr(chiplet, cpu, &core);
+ if (mmr == 0)
+ return;
+
+ if (--gru_irq_count[chiplet] == 0)
+ free_irq(irq, NULL);
}
#elif defined CONFIG_X86_64
-static void noop(unsigned int irq)
+static int gru_chiplet_setup_tlb_irq(int chiplet, char *irq_name,
+ irq_handler_t irq_handler, int cpu, int blade)
{
+ unsigned long mmr;
+ int irq, core;
+ int ret;
+
+ mmr = gru_chiplet_cpu_to_mmr(chiplet, cpu, &core);
+ if (mmr == 0)
+ return 0;
+
+ irq = uv_setup_irq(irq_name, cpu, blade, mmr, UV_AFFINITY_CPU);
+ if (irq < 0) {
+ printk(KERN_ERR "%s: uv_setup_irq failed, errno=%d\n",
+ GRU_DRIVER_ID_STR, -irq);
+ return irq;
+ }
+
+ ret = request_irq(irq, irq_handler, 0, irq_name, NULL);
+ if (ret) {
+ uv_teardown_irq(irq);
+ printk(KERN_ERR "%s: request_irq failed, errno=%d\n",
+ GRU_DRIVER_ID_STR, -ret);
+ return ret;
+ }
+ gru_base[blade]->bs_grus[chiplet].gs_irq[core] = irq;
+ return 0;
}
-static struct irq_chip gru_chip = {
- .name = "gru",
- .mask = noop,
- .unmask = noop,
- .ack = noop,
-};
-
-static int get_base_irq(void)
+static void gru_chiplet_teardown_tlb_irq(int chiplet, int cpu, int blade)
{
- set_irq_chip(IRQ_GRU, &gru_chip);
- set_irq_chip(IRQ_GRU + 1, &gru_chip);
- return IRQ_GRU;
+ int irq, core;
+ unsigned long mmr;
+
+ mmr = gru_chiplet_cpu_to_mmr(chiplet, cpu, &core);
+ if (mmr) {
+ irq = gru_base[blade]->bs_grus[chiplet].gs_irq[core];
+ if (irq) {
+ free_irq(irq, NULL);
+ uv_teardown_irq(irq);
+ }
+ }
}
+
#endif
+static void gru_teardown_tlb_irqs(void)
+{
+ int blade;
+ int cpu;
+
+ for_each_online_cpu(cpu) {
+ blade = uv_cpu_to_blade_id(cpu);
+ gru_chiplet_teardown_tlb_irq(0, cpu, blade);
+ gru_chiplet_teardown_tlb_irq(1, cpu, blade);
+ }
+ for_each_possible_blade(blade) {
+ if (uv_blade_nr_possible_cpus(blade))
+ continue;
+ gru_chiplet_teardown_tlb_irq(0, 0, blade);
+ gru_chiplet_teardown_tlb_irq(1, 0, blade);
+ }
+}
+
+static int gru_setup_tlb_irqs(void)
+{
+ int blade;
+ int cpu;
+ int ret;
+
+ for_each_online_cpu(cpu) {
+ blade = uv_cpu_to_blade_id(cpu);
+ ret = gru_chiplet_setup_tlb_irq(0, "GRU0_TLB", gru0_intr, cpu, blade);
+ if (ret != 0)
+ goto exit1;
+
+ ret = gru_chiplet_setup_tlb_irq(1, "GRU1_TLB", gru1_intr, cpu, blade);
+ if (ret != 0)
+ goto exit1;
+ }
+ for_each_possible_blade(blade) {
+ if (uv_blade_nr_possible_cpus(blade))
+ continue;
+ ret = gru_chiplet_setup_tlb_irq(0, "GRU0_TLB", gru_intr_mblade, 0, blade);
+ if (ret != 0)
+ goto exit1;
+
+ ret = gru_chiplet_setup_tlb_irq(1, "GRU1_TLB", gru_intr_mblade, 0, blade);
+ if (ret != 0)
+ goto exit1;
+ }
+
+ return 0;
+
+exit1:
+ gru_teardown_tlb_irqs();
+ return ret;
+}
+
/*
* gru_init
*
@@ -338,8 +516,7 @@ static int get_base_irq(void)
*/
static int __init gru_init(void)
{
- int ret, irq, chip;
- char id[10];
+ int ret;
if (!is_uv_system())
return 0;
@@ -354,41 +531,29 @@ static int __init gru_init(void)
gru_end_paddr = gru_start_paddr + GRU_MAX_BLADES * GRU_SIZE;
printk(KERN_INFO "GRU space: 0x%lx - 0x%lx\n",
gru_start_paddr, gru_end_paddr);
- irq = get_base_irq();
- for (chip = 0; chip < GRU_CHIPLETS_PER_BLADE; chip++) {
- ret = request_irq(irq + chip, gru_intr, 0, id, NULL);
- /* TODO: fix irq handling on x86. For now ignore failure because
- * interrupts are not required & not yet fully supported */
- if (ret) {
- printk(KERN_WARNING
- "!!!WARNING: GRU ignoring request failure!!!\n");
- ret = 0;
- }
- if (ret) {
- printk(KERN_ERR "%s: request_irq failed\n",
- GRU_DRIVER_ID_STR);
- goto exit1;
- }
- }
-
ret = misc_register(&gru_miscdev);
if (ret) {
printk(KERN_ERR "%s: misc_register failed\n",
GRU_DRIVER_ID_STR);
- goto exit1;
+ goto exit0;
}
ret = gru_proc_init();
if (ret) {
printk(KERN_ERR "%s: proc init failed\n", GRU_DRIVER_ID_STR);
- goto exit2;
+ goto exit1;
}
ret = gru_init_tables(gru_start_paddr, gru_start_vaddr);
if (ret) {
printk(KERN_ERR "%s: init tables failed\n", GRU_DRIVER_ID_STR);
- goto exit3;
+ goto exit2;
}
+
+ ret = gru_setup_tlb_irqs();
+ if (ret != 0)
+ goto exit3;
+
gru_kservices_init();
printk(KERN_INFO "%s: v%s\n", GRU_DRIVER_ID_STR,
@@ -396,31 +561,24 @@ static int __init gru_init(void)
return 0;
exit3:
- gru_proc_exit();
+ gru_free_tables();
exit2:
- misc_deregister(&gru_miscdev);
+ gru_proc_exit();
exit1:
- for (--chip; chip >= 0; chip--)
- free_irq(irq + chip, NULL);
+ misc_deregister(&gru_miscdev);
+exit0:
return ret;
}
static void __exit gru_exit(void)
{
- int i, bid;
- int order = get_order(sizeof(struct gru_state) *
- GRU_CHIPLETS_PER_BLADE);
-
if (!is_uv_system())
return;
- for (i = 0; i < GRU_CHIPLETS_PER_BLADE; i++)
- free_irq(IRQ_GRU + i, NULL);
+ gru_teardown_tlb_irqs();
gru_kservices_exit();
- for (bid = 0; bid < GRU_MAX_BLADES; bid++)
- free_pages((unsigned long)gru_base[bid], order);
-
+ gru_free_tables();
misc_deregister(&gru_miscdev);
gru_proc_exit();
}
diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
index 37e7cfc53b9..2f30badc6ff 100644
--- a/drivers/misc/sgi-gru/gruhandles.c
+++ b/drivers/misc/sgi-gru/gruhandles.c
@@ -27,9 +27,11 @@
#ifdef CONFIG_IA64
#include
#define GRU_OPERATION_TIMEOUT (((cycles_t) local_cpu_data->itc_freq)*10)
+#define CLKS2NSEC(c) ((c) *1000000000 / local_cpu_data->itc_freq)
#else
#include
#define GRU_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000)
+#define CLKS2NSEC(c) ((c) * 1000000 / tsc_khz)
#endif
/* Extract the status field from a kernel handle */
@@ -39,21 +41,39 @@ struct mcs_op_statistic mcs_op_statistics[mcsop_last];
static void update_mcs_stats(enum mcs_op op, unsigned long clks)
{
+ unsigned long nsec;
+
+ nsec = CLKS2NSEC(clks);
atomic_long_inc(&mcs_op_statistics[op].count);
- atomic_long_add(clks, &mcs_op_statistics[op].total);
- if (mcs_op_statistics[op].max < clks)
- mcs_op_statistics[op].max = clks;
+ atomic_long_add(nsec, &mcs_op_statistics[op].total);
+ if (mcs_op_statistics[op].max < nsec)
+ mcs_op_statistics[op].max = nsec;
}
static void start_instruction(void *h)
{
unsigned long *w0 = h;
- wmb(); /* setting CMD bit must be last */
- *w0 = *w0 | 1;
+ wmb(); /* setting CMD/STATUS bits must be last */
+ *w0 = *w0 | 0x20001;
gru_flush_cache(h);
}
+static void report_instruction_timeout(void *h)
+{
+ unsigned long goff = GSEGPOFF((unsigned long)h);
+ char *id = "???";
+
+ if (TYPE_IS(CCH, goff))
+ id = "CCH";
+ else if (TYPE_IS(TGH, goff))
+ id = "TGH";
+ else if (TYPE_IS(TFH, goff))
+ id = "TFH";
+
+ panic(KERN_ALERT "GRU %p (%s) is malfunctioning\n", h, id);
+}
+
static int wait_instruction_complete(void *h, enum mcs_op opc)
{
int status;
@@ -64,9 +84,10 @@ static int wait_instruction_complete(void *h, enum mcs_op opc)
status = GET_MSEG_HANDLE_STATUS(h);
if (status != CCHSTATUS_ACTIVE)
break;
- if (GRU_OPERATION_TIMEOUT < (get_cycles() - start_time))
- panic("GRU %p is malfunctioning: start %ld, end %ld\n",
- h, start_time, (unsigned long)get_cycles());
+ if (GRU_OPERATION_TIMEOUT < (get_cycles() - start_time)) {
+ report_instruction_timeout(h);
+ start_time = get_cycles();
+ }
}
if (gru_options & OPT_STATS)
update_mcs_stats(opc, get_cycles() - start_time);
@@ -75,9 +96,18 @@ static int wait_instruction_complete(void *h, enum mcs_op opc)
int cch_allocate(struct gru_context_configuration_handle *cch)
{
+ int ret;
+
cch->opc = CCHOP_ALLOCATE;
start_instruction(cch);
- return wait_instruction_complete(cch, cchop_allocate);
+ ret = wait_instruction_complete(cch, cchop_allocate);
+
+ /*
+ * Stop speculation into the GSEG being mapped by the previous ALLOCATE.
+ * The GSEG memory does not exist until the ALLOCATE completes.
+ */
+ sync_core();
+ return ret;
}
int cch_start(struct gru_context_configuration_handle *cch)
@@ -96,9 +126,18 @@ int cch_interrupt(struct gru_context_configuration_handle *cch)
int cch_deallocate(struct gru_context_configuration_handle *cch)
{
+ int ret;
+
cch->opc = CCHOP_DEALLOCATE;
start_instruction(cch);
- return wait_instruction_complete(cch, cchop_deallocate);
+ ret = wait_instruction_complete(cch, cchop_deallocate);
+
+ /*
+ * Stop speculation into the GSEG being unmapped by the previous
+ * DEALLOCATE.
+ */
+ sync_core();
+ return ret;
}
int cch_interrupt_sync(struct gru_context_configuration_handle
@@ -126,17 +165,20 @@ int tgh_invalidate(struct gru_tlb_global_handle *tgh,
return wait_instruction_complete(tgh, tghop_invalidate);
}
-void tfh_write_only(struct gru_tlb_fault_handle *tfh,
- unsigned long pfn, unsigned long vaddr,
- int asid, int dirty, int pagesize)
+int tfh_write_only(struct gru_tlb_fault_handle *tfh,
+ unsigned long paddr, int gaa,
+ unsigned long vaddr, int asid, int dirty,
+ int pagesize)
{
tfh->fillasid = asid;
tfh->fillvaddr = vaddr;
- tfh->pfn = pfn;
+ tfh->pfn = paddr >> GRU_PADDR_SHIFT;
+ tfh->gaa = gaa;
tfh->dirty = dirty;
tfh->pagesize = pagesize;
tfh->opc = TFHOP_WRITE_ONLY;
start_instruction(tfh);
+ return wait_instruction_complete(tfh, tfhop_write_only);
}
void tfh_write_restart(struct gru_tlb_fault_handle *tfh,
diff --git a/drivers/misc/sgi-gru/gruhandles.h b/drivers/misc/sgi-gru/gruhandles.h
index f44112242d0..3f998b924d8 100644
--- a/drivers/misc/sgi-gru/gruhandles.h
+++ b/drivers/misc/sgi-gru/gruhandles.h
@@ -91,6 +91,12 @@
/* Convert an arbitrary handle address to the beginning of the GRU segment */
#define GRUBASE(h) ((void *)((unsigned long)(h) & ~(GRU_SIZE - 1)))
+/* Test a valid handle address to determine the type */
+#define TYPE_IS(hn, h) ((h) >= GRU_##hn##_BASE && (h) < \
+ GRU_##hn##_BASE + GRU_NUM_##hn * GRU_HANDLE_STRIDE && \
+ (((h) & (GRU_HANDLE_STRIDE - 1)) == 0))
+
+
/* General addressing macros. */
static inline void *get_gseg_base_address(void *base, int ctxnum)
{
@@ -158,6 +164,16 @@ static inline void *gru_chiplet_vaddr(void *vaddr, int pnode, int chiplet)
return vaddr + GRU_SIZE * (2 * pnode + chiplet);
}
+static inline struct gru_control_block_extended *gru_tfh_to_cbe(
+ struct gru_tlb_fault_handle *tfh)
+{
+ unsigned long cbe;
+
+ cbe = (unsigned long)tfh - GRU_TFH_BASE + GRU_CBE_BASE;
+ return (struct gru_control_block_extended*)cbe;
+}
+
+
/*
@@ -236,6 +252,17 @@ enum gru_tgh_state {
TGHSTATE_RESTART_CTX,
};
+enum gru_tgh_cause {
+ TGHCAUSE_RR_ECC,
+ TGHCAUSE_TLB_ECC,
+ TGHCAUSE_LRU_ECC,
+ TGHCAUSE_PS_ECC,
+ TGHCAUSE_MUL_ERR,
+ TGHCAUSE_DATA_ERR,
+ TGHCAUSE_SW_FORCE
+};
+
+
/*
* TFH - TLB Global Handle
* Used for TLB dropins into the GRU TLB.
@@ -440,6 +467,12 @@ struct gru_control_block_extended {
unsigned int cbrexecstatus:8;
};
+/* CBE fields for active BCOPY instructions */
+#define cbe_baddr0 idef1upd
+#define cbe_baddr1 idef3upd
+#define cbe_src_cl idef6cpy
+#define cbe_nelemcur idef5upd
+
enum gru_cbr_state {
CBRSTATE_INACTIVE,
CBRSTATE_IDLE,
@@ -487,8 +520,8 @@ int cch_interrupt_sync(struct gru_context_configuration_handle *cch);
int tgh_invalidate(struct gru_tlb_global_handle *tgh, unsigned long vaddr,
unsigned long vaddrmask, int asid, int pagesize, int global, int n,
unsigned short ctxbitmap);
-void tfh_write_only(struct gru_tlb_fault_handle *tfh, unsigned long pfn,
- unsigned long vaddr, int asid, int dirty, int pagesize);
+int tfh_write_only(struct gru_tlb_fault_handle *tfh, unsigned long paddr,
+ int gaa, unsigned long vaddr, int asid, int dirty, int pagesize);
void tfh_write_restart(struct gru_tlb_fault_handle *tfh, unsigned long paddr,
int gaa, unsigned long vaddr, int asid, int dirty, int pagesize);
void tfh_restart(struct gru_tlb_fault_handle *tfh);
diff --git a/drivers/misc/sgi-gru/grukdump.c b/drivers/misc/sgi-gru/grukdump.c
index 55eabfa8558..9b2062d1732 100644
--- a/drivers/misc/sgi-gru/grukdump.c
+++ b/drivers/misc/sgi-gru/grukdump.c
@@ -44,7 +44,8 @@ static int gru_user_copy_handle(void __user **dp, void *s)
static int gru_dump_context_data(void *grubase,
struct gru_context_configuration_handle *cch,
- void __user *ubuf, int ctxnum, int dsrcnt)
+ void __user *ubuf, int ctxnum, int dsrcnt,
+ int flush_cbrs)
{
void *cb, *cbe, *tfh, *gseg;
int i, scr;
@@ -55,6 +56,8 @@ static int gru_dump_context_data(void *grubase,
tfh = grubase + GRU_TFH_BASE;
for_each_cbr_in_allocation_map(i, &cch->cbr_allocation_map, scr) {
+ if (flush_cbrs)
+ gru_flush_cache(cb);
if (gru_user_copy_handle(&ubuf, cb))
goto fail;
if (gru_user_copy_handle(&ubuf, tfh + i * GRU_HANDLE_STRIDE))
@@ -115,7 +118,7 @@ fail:
static int gru_dump_context(struct gru_state *gru, int ctxnum,
void __user *ubuf, void __user *ubufend, char data_opt,
- char lock_cch)
+ char lock_cch, char flush_cbrs)
{
struct gru_dump_context_header hdr;
struct gru_dump_context_header __user *uhdr = ubuf;
@@ -159,8 +162,7 @@ static int gru_dump_context(struct gru_state *gru, int ctxnum,
ret = -EFBIG;
else
ret = gru_dump_context_data(grubase, cch, ubuf, ctxnum,
- dsrcnt);
-
+ dsrcnt, flush_cbrs);
}
if (cch_locked)
unlock_cch_handle(cch);
@@ -215,7 +217,8 @@ int gru_dump_chiplet_request(unsigned long arg)
for (ctxnum = 0; ctxnum < GRU_NUM_CCH; ctxnum++) {
if (req.ctxnum == ctxnum || req.ctxnum < 0) {
ret = gru_dump_context(gru, ctxnum, ubuf, ubufend,
- req.data_opt, req.lock_cch);
+ req.data_opt, req.lock_cch,
+ req.flush_cbrs);
if (ret < 0)
goto fail;
ubuf += ret;
diff --git a/drivers/misc/sgi-gru/grukservices.c b/drivers/misc/sgi-gru/grukservices.c
index 766e21e1557..34749ee88df 100644
--- a/drivers/misc/sgi-gru/grukservices.c
+++ b/drivers/misc/sgi-gru/grukservices.c
@@ -31,6 +31,7 @@
#include
#include
#include
+#include
#include "gru.h"
#include "grulib.h"
#include "grutables.h"
@@ -97,9 +98,6 @@
#define ASYNC_HAN_TO_BID(h) ((h) - 1)
#define ASYNC_BID_TO_HAN(b) ((b) + 1)
#define ASYNC_HAN_TO_BS(h) gru_base[ASYNC_HAN_TO_BID(h)]
-#define KCB_TO_GID(cb) ((cb - gru_start_vaddr) / \
- (GRU_SIZE * GRU_CHIPLETS_PER_BLADE))
-#define KCB_TO_BS(cb) gru_base[KCB_TO_GID(cb)]
#define GRU_NUM_KERNEL_CBR 1
#define GRU_NUM_KERNEL_DSR_BYTES 256
@@ -160,8 +158,10 @@ static void gru_load_kernel_context(struct gru_blade_state *bs, int blade_id)
up_read(&bs->bs_kgts_sema);
down_write(&bs->bs_kgts_sema);
- if (!bs->bs_kgts)
- bs->bs_kgts = gru_alloc_gts(NULL, 0, 0, 0, 0);
+ if (!bs->bs_kgts) {
+ bs->bs_kgts = gru_alloc_gts(NULL, 0, 0, 0, 0, 0);
+ bs->bs_kgts->ts_user_blade_id = blade_id;
+ }
kgts = bs->bs_kgts;
if (!kgts->ts_gru) {
@@ -172,9 +172,9 @@ static void gru_load_kernel_context(struct gru_blade_state *bs, int blade_id)
kgts->ts_dsr_au_count = GRU_DS_BYTES_TO_AU(
GRU_NUM_KERNEL_DSR_BYTES * ncpus +
bs->bs_async_dsr_bytes);
- while (!gru_assign_gru_context(kgts, blade_id)) {
+ while (!gru_assign_gru_context(kgts)) {
msleep(1);
- gru_steal_context(kgts, blade_id);
+ gru_steal_context(kgts);
}
gru_load_context(kgts);
gru = bs->bs_kgts->ts_gru;
@@ -200,13 +200,15 @@ static int gru_free_kernel_contexts(void)
bs = gru_base[bid];
if (!bs)
continue;
+
+ /* Ignore busy contexts. Don't want to block here. */
if (down_write_trylock(&bs->bs_kgts_sema)) {
kgts = bs->bs_kgts;
if (kgts && kgts->ts_gru)
gru_unload_context(kgts, 0);
- kfree(kgts);
bs->bs_kgts = NULL;
up_write(&bs->bs_kgts_sema);
+ kfree(kgts);
} else {
ret++;
}
@@ -220,13 +222,21 @@ static int gru_free_kernel_contexts(void)
static struct gru_blade_state *gru_lock_kernel_context(int blade_id)
{
struct gru_blade_state *bs;
+ int bid;
STAT(lock_kernel_context);
- bs = gru_base[blade_id];
+again:
+ bid = blade_id < 0 ? uv_numa_blade_id() : blade_id;
+ bs = gru_base[bid];
+ /* Handle the case where migration occured while waiting for the sema */
down_read(&bs->bs_kgts_sema);
+ if (blade_id < 0 && bid != uv_numa_blade_id()) {
+ up_read(&bs->bs_kgts_sema);
+ goto again;
+ }
if (!bs->bs_kgts || !bs->bs_kgts->ts_gru)
- gru_load_kernel_context(bs, blade_id);
+ gru_load_kernel_context(bs, bid);
return bs;
}
@@ -255,7 +265,7 @@ static int gru_get_cpu_resources(int dsr_bytes, void **cb, void **dsr)
BUG_ON(dsr_bytes > GRU_NUM_KERNEL_DSR_BYTES);
preempt_disable();
- bs = gru_lock_kernel_context(uv_numa_blade_id());
+ bs = gru_lock_kernel_context(-1);
lcpu = uv_blade_processor_id();
*cb = bs->kernel_cb + lcpu * GRU_HANDLE_STRIDE;
*dsr = bs->kernel_dsr + lcpu * GRU_NUM_KERNEL_DSR_BYTES;
@@ -384,13 +394,31 @@ int gru_get_cb_exception_detail(void *cb,
struct control_block_extended_exc_detail *excdet)
{
struct gru_control_block_extended *cbe;
- struct gru_blade_state *bs;
- int cbrnum;
+ struct gru_thread_state *kgts = NULL;
+ unsigned long off;
+ int cbrnum, bid;
- bs = KCB_TO_BS(cb);
- cbrnum = thread_cbr_number(bs->bs_kgts, get_cb_number(cb));
+ /*
+ * Locate kgts for cb. This algorithm is SLOW but
+ * this function is rarely called (ie., almost never).
+ * Performance does not matter.
+ */
+ for_each_possible_blade(bid) {
+ if (!gru_base[bid])
+ break;
+ kgts = gru_base[bid]->bs_kgts;
+ if (!kgts || !kgts->ts_gru)
+ continue;
+ off = cb - kgts->ts_gru->gs_gru_base_vaddr;
+ if (off < GRU_SIZE)
+ break;
+ kgts = NULL;
+ }
+ BUG_ON(!kgts);
+ cbrnum = thread_cbr_number(kgts, get_cb_number(cb));
cbe = get_cbe(GRUBASE(cb), cbrnum);
gru_flush_cache(cbe); /* CBE not coherent */
+ sync_core();
excdet->opc = cbe->opccpy;
excdet->exopc = cbe->exopccpy;
excdet->ecause = cbe->ecause;
@@ -409,8 +437,8 @@ char *gru_get_cb_exception_detail_str(int ret, void *cb,
if (ret > 0 && gen->istatus == CBS_EXCEPTION) {
gru_get_cb_exception_detail(cb, &excdet);
snprintf(buf, size,
- "GRU exception: cb %p, opc %d, exopc %d, ecause 0x%x,"
- "excdet0 0x%lx, excdet1 0x%x",
+ "GRU:%d exception: cb %p, opc %d, exopc %d, ecause 0x%x,"
+ "excdet0 0x%lx, excdet1 0x%x", smp_processor_id(),
gen, excdet.opc, excdet.exopc, excdet.ecause,
excdet.exceptdet0, excdet.exceptdet1);
} else {
@@ -457,9 +485,10 @@ int gru_check_status_proc(void *cb)
int ret;
ret = gen->istatus;
- if (ret != CBS_EXCEPTION)
- return ret;
- return gru_retry_exception(cb);
+ if (ret == CBS_EXCEPTION)
+ ret = gru_retry_exception(cb);
+ rmb();
+ return ret;
}
@@ -471,7 +500,7 @@ int gru_wait_proc(void *cb)
ret = gru_wait_idle_or_exception(gen);
if (ret == CBS_EXCEPTION)
ret = gru_retry_exception(cb);
-
+ rmb();
return ret;
}
@@ -538,7 +567,7 @@ int gru_create_message_queue(struct gru_message_queue_desc *mqd,
mqd->mq = mq;
mqd->mq_gpa = uv_gpa(mq);
mqd->qlines = qlines;
- mqd->interrupt_pnode = UV_NASID_TO_PNODE(nasid);
+ mqd->interrupt_pnode = nasid >> 1;
mqd->interrupt_vector = vector;
mqd->interrupt_apicid = apicid;
return 0;
@@ -598,6 +627,8 @@ static int send_noop_message(void *cb, struct gru_message_queue_desc *mqd,
ret = MQE_UNEXPECTED_CB_ERR;
break;
case CBSS_PAGE_OVERFLOW:
+ STAT(mesq_noop_page_overflow);
+ /* fallthru */
default:
BUG();
}
@@ -672,18 +703,6 @@ cberr:
return MQE_UNEXPECTED_CB_ERR;
}
-/*
- * Send a cross-partition interrupt to the SSI that contains the target
- * message queue. Normally, the interrupt is automatically delivered by hardware
- * but some error conditions require explicit delivery.
- */
-static void send_message_queue_interrupt(struct gru_message_queue_desc *mqd)
-{
- if (mqd->interrupt_vector)
- uv_hub_send_ipi(mqd->interrupt_pnode, mqd->interrupt_apicid,
- mqd->interrupt_vector);
-}
-
/*
* Handle a PUT failure. Note: if message was a 2-line message, one of the
* lines might have successfully have been written. Before sending the
@@ -693,7 +712,8 @@ static void send_message_queue_interrupt(struct gru_message_queue_desc *mqd)
static int send_message_put_nacked(void *cb, struct gru_message_queue_desc *mqd,
void *mesg, int lines)
{
- unsigned long m;
+ unsigned long m, *val = mesg, gpa, save;
+ int ret;
m = mqd->mq_gpa + (gru_get_amo_value_head(cb) << 6);
if (lines == 2) {
@@ -704,7 +724,26 @@ static int send_message_put_nacked(void *cb, struct gru_message_queue_desc *mqd,
gru_vstore(cb, m, gru_get_tri(mesg), XTYPE_CL, lines, 1, IMA);
if (gru_wait(cb) != CBS_IDLE)
return MQE_UNEXPECTED_CB_ERR;
- send_message_queue_interrupt(mqd);
+
+ if (!mqd->interrupt_vector)
+ return MQE_OK;
+
+ /*
+ * Send a cross-partition interrupt to the SSI that contains the target
+ * message queue. Normally, the interrupt is automatically delivered by
+ * hardware but some error conditions require explicit delivery.
+ * Use the GRU to deliver the interrupt. Otherwise partition failures
+ * could cause unrecovered errors.
+ */
+ gpa = uv_global_gru_mmr_address(mqd->interrupt_pnode, UVH_IPI_INT);
+ save = *val;
+ *val = uv_hub_ipi_value(mqd->interrupt_apicid, mqd->interrupt_vector,
+ dest_Fixed);
+ gru_vstore_phys(cb, gpa, gru_get_tri(mesg), IAA_REGISTER, IMA);
+ ret = gru_wait(cb);
+ *val = save;
+ if (ret != CBS_IDLE)
+ return MQE_UNEXPECTED_CB_ERR;
return MQE_OK;
}
@@ -739,6 +778,9 @@ static int send_message_failure(void *cb, struct gru_message_queue_desc *mqd,
STAT(mesq_send_put_nacked);
ret = send_message_put_nacked(cb, mqd, mesg, lines);
break;
+ case CBSS_PAGE_OVERFLOW:
+ STAT(mesq_page_overflow);
+ /* fallthru */
default:
BUG();
}
@@ -831,7 +873,6 @@ void *gru_get_next_message(struct gru_message_queue_desc *mqd)
int present = mhdr->present;
/* skip NOOP messages */
- STAT(mesq_receive);
while (present == MQS_NOOP) {
gru_free_message(mqd, mhdr);
mhdr = mq->next;
@@ -851,12 +892,36 @@ void *gru_get_next_message(struct gru_message_queue_desc *mqd)
if (mhdr->lines == 2)
restore_present2(mhdr, mhdr->present2);
+ STAT(mesq_receive);
return mhdr;
}
EXPORT_SYMBOL_GPL(gru_get_next_message);
/* ---------------------- GRU DATA COPY FUNCTIONS ---------------------------*/
+/*
+ * Load a DW from a global GPA. The GPA can be a memory or MMR address.
+ */
+int gru_read_gpa(unsigned long *value, unsigned long gpa)
+{
+ void *cb;
+ void *dsr;
+ int ret, iaa;
+
+ STAT(read_gpa);
+ if (gru_get_cpu_resources(GRU_NUM_KERNEL_DSR_BYTES, &cb, &dsr))
+ return MQE_BUG_NO_RESOURCES;
+ iaa = gpa >> 62;
+ gru_vload_phys(cb, gpa, gru_get_tri(dsr), iaa, IMA);
+ ret = gru_wait(cb);
+ if (ret == CBS_IDLE)
+ *value = *(unsigned long *)dsr;
+ gru_free_cpu_resources(cb, dsr);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(gru_read_gpa);
+
+
/*
* Copy a block of data using the GRU resources
*/
@@ -898,24 +963,24 @@ static int quicktest0(unsigned long arg)
gru_vload(cb, uv_gpa(&word0), gru_get_tri(dsr), XTYPE_DW, 1, 1, IMA);
if (gru_wait(cb) != CBS_IDLE) {
- printk(KERN_DEBUG "GRU quicktest0: CBR failure 1\n");
+ printk(KERN_DEBUG "GRU:%d quicktest0: CBR failure 1\n", smp_processor_id());
goto done;
}
if (*p != MAGIC) {
- printk(KERN_DEBUG "GRU: quicktest0 bad magic 0x%lx\n", *p);
+ printk(KERN_DEBUG "GRU:%d quicktest0 bad magic 0x%lx\n", smp_processor_id(), *p);
goto done;
}
gru_vstore(cb, uv_gpa(&word1), gru_get_tri(dsr), XTYPE_DW, 1, 1, IMA);
if (gru_wait(cb) != CBS_IDLE) {
- printk(KERN_DEBUG "GRU quicktest0: CBR failure 2\n");
+ printk(KERN_DEBUG "GRU:%d quicktest0: CBR failure 2\n", smp_processor_id());
goto done;
}
if (word0 != word1 || word1 != MAGIC) {
printk(KERN_DEBUG
- "GRU quicktest0 err: found 0x%lx, expected 0x%lx\n",
- word1, MAGIC);
+ "GRU:%d quicktest0 err: found 0x%lx, expected 0x%lx\n",
+ smp_processor_id(), word1, MAGIC);
goto done;
}
ret = 0;
@@ -952,8 +1017,11 @@ static int quicktest1(unsigned long arg)
if (ret)
break;
}
- if (ret != MQE_QUEUE_FULL || i != 4)
+ if (ret != MQE_QUEUE_FULL || i != 4) {
+ printk(KERN_DEBUG "GRU:%d quicktest1: unexpect status %d, i %d\n",
+ smp_processor_id(), ret, i);
goto done;
+ }
for (i = 0; i < 6; i++) {
m = gru_get_next_message(&mqd);
@@ -961,7 +1029,12 @@ static int quicktest1(unsigned long arg)
break;
gru_free_message(&mqd, m);
}
- ret = (i == 4) ? 0 : -EIO;
+ if (i != 4) {
+ printk(KERN_DEBUG "GRU:%d quicktest2: bad message, i %d, m %p, m8 %d\n",
+ smp_processor_id(), i, m, m ? m[8] : -1);
+ goto done;
+ }
+ ret = 0;
done:
kfree(p);
@@ -977,6 +1050,7 @@ static int quicktest2(unsigned long arg)
int ret = 0;
unsigned long *buf;
void *cb0, *cb;
+ struct gru_control_block_status *gen;
int i, k, istatus, bytes;
bytes = numcb * 4 * 8;
@@ -996,20 +1070,30 @@ static int quicktest2(unsigned long arg)
XTYPE_DW, 4, 1, IMA_INTERRUPT);
ret = 0;
- for (k = 0; k < numcb; k++) {
+ k = numcb;
+ do {
gru_wait_async_cbr(han);
for (i = 0; i < numcb; i++) {
cb = cb0 + i * GRU_HANDLE_STRIDE;
istatus = gru_check_status(cb);
- if (istatus == CBS_ACTIVE)
- continue;
- if (istatus == CBS_EXCEPTION)
- ret = -EFAULT;
- else if (buf[i] || buf[i + 1] || buf[i + 2] ||
- buf[i + 3])
- ret = -EIO;
+ if (istatus != CBS_ACTIVE && istatus != CBS_CALL_OS)
+ break;
}
- }
+ if (i == numcb)
+ continue;
+ if (istatus != CBS_IDLE) {
+ printk(KERN_DEBUG "GRU:%d quicktest2: cb %d, exception\n", smp_processor_id(), i);
+ ret = -EFAULT;
+ } else if (buf[4 * i] || buf[4 * i + 1] || buf[4 * i + 2] ||
+ buf[4 * i + 3]) {
+ printk(KERN_DEBUG "GRU:%d quicktest2:cb %d, buf 0x%lx, 0x%lx, 0x%lx, 0x%lx\n",
+ smp_processor_id(), i, buf[4 * i], buf[4 * i + 1], buf[4 * i + 2], buf[4 * i + 3]);
+ ret = -EIO;
+ }
+ k--;
+ gen = cb;
+ gen->istatus = CBS_CALL_OS; /* don't handle this CBR again */
+ } while (k);
BUG_ON(cmp.done);
gru_unlock_async_resource(han);
@@ -1019,6 +1103,22 @@ done:
return ret;
}
+#define BUFSIZE 200
+static int quicktest3(unsigned long arg)
+{
+ char buf1[BUFSIZE], buf2[BUFSIZE];
+ int ret = 0;
+
+ memset(buf2, 0, sizeof(buf2));
+ memset(buf1, get_cycles() & 255, sizeof(buf1));
+ gru_copy_gpa(uv_gpa(buf2), uv_gpa(buf1), BUFSIZE);
+ if (memcmp(buf1, buf2, BUFSIZE)) {
+ printk(KERN_DEBUG "GRU:%d quicktest3 error\n", smp_processor_id());
+ ret = -EIO;
+ }
+ return ret;
+}
+
/*
* Debugging only. User hook for various kernel tests
* of driver & gru.
@@ -1037,6 +1137,9 @@ int gru_ktest(unsigned long arg)
case 2:
ret = quicktest2(arg);
break;
+ case 3:
+ ret = quicktest3(arg);
+ break;
case 99:
ret = gru_free_kernel_contexts();
break;
diff --git a/drivers/misc/sgi-gru/grukservices.h b/drivers/misc/sgi-gru/grukservices.h
index d60d34bca44..02aa94d8484 100644
--- a/drivers/misc/sgi-gru/grukservices.h
+++ b/drivers/misc/sgi-gru/grukservices.h
@@ -130,6 +130,20 @@ extern void gru_free_message(struct gru_message_queue_desc *mqd,
extern void *gru_get_next_message(struct gru_message_queue_desc *mqd);
+/*
+ * Read a GRU global GPA. Source can be located in a remote partition.
+ *
+ * Input:
+ * value memory address where MMR value is returned
+ * gpa source numalink physical address of GPA
+ *
+ * Output:
+ * 0 OK
+ * >0 error
+ */
+int gru_read_gpa(unsigned long *value, unsigned long gpa);
+
+
/*
* Copy data using the GRU. Source or destination can be located in a remote
* partition.
diff --git a/drivers/misc/sgi-gru/grulib.h b/drivers/misc/sgi-gru/grulib.h
index 889bc442a3e..e77d1b1f9d0 100644
--- a/drivers/misc/sgi-gru/grulib.h
+++ b/drivers/misc/sgi-gru/grulib.h
@@ -63,18 +63,9 @@
#define THREAD_POINTER(p, th) (p + GRU_GSEG_PAGESIZE * (th))
#define GSEG_START(cb) ((void *)((unsigned long)(cb) & ~(GRU_GSEG_PAGESIZE - 1)))
-/*
- * Statictics kept on a per-GTS basis.
- */
-struct gts_statistics {
- unsigned long fmm_tlbdropin;
- unsigned long upm_tlbdropin;
- unsigned long context_stolen;
-};
-
struct gru_get_gseg_statistics_req {
- unsigned long gseg;
- struct gts_statistics stats;
+ unsigned long gseg;
+ struct gru_gseg_statistics stats;
};
/*
@@ -86,6 +77,7 @@ struct gru_create_context_req {
unsigned int control_blocks;
unsigned int maximum_thread_count;
unsigned int options;
+ unsigned char tlb_preload_count;
};
/*
@@ -98,11 +90,12 @@ struct gru_unload_context_req {
/*
* Structure used to set context options
*/
-enum {sco_gseg_owner, sco_cch_req_slice};
+enum {sco_gseg_owner, sco_cch_req_slice, sco_blade_chiplet};
struct gru_set_context_option_req {
unsigned long gseg;
int op;
- unsigned long val1;
+ int val0;
+ long val1;
};
/*
@@ -124,6 +117,8 @@ struct gru_dump_chiplet_state_req {
int ctxnum;
char data_opt;
char lock_cch;
+ char flush_cbrs;
+ char fill[10];
pid_t pid;
void *buf;
size_t buflen;
diff --git a/drivers/misc/sgi-gru/grumain.c b/drivers/misc/sgi-gru/grumain.c
index 3bc643dad60..f8538bbd0bf 100644
--- a/drivers/misc/sgi-gru/grumain.c
+++ b/drivers/misc/sgi-gru/grumain.c
@@ -27,6 +27,7 @@
#include
#include
#include
+#include
#include
#include "gru.h"
#include "grutables.h"
@@ -48,12 +49,20 @@ struct device *grudev = &gru_device;
/*
* Select a gru fault map to be used by the current cpu. Note that
* multiple cpus may be using the same map.
- * ZZZ should "shift" be used?? Depends on HT cpu numbering
* ZZZ should be inline but did not work on emulator
*/
int gru_cpu_fault_map_id(void)
{
+#ifdef CONFIG_IA64
return uv_blade_processor_id() % GRU_NUM_TFM;
+#else
+ int cpu = smp_processor_id();
+ int id, core;
+
+ core = uv_cpu_core_number(cpu);
+ id = core + UV_MAX_INT_CORES * uv_cpu_socket_number(cpu);
+ return id;
+#endif
}
/*--------- ASID Management -------------------------------------------
@@ -286,7 +295,8 @@ static void gru_unload_mm_tracker(struct gru_state *gru,
void gts_drop(struct gru_thread_state *gts)
{
if (gts && atomic_dec_return(>s->ts_refcnt) == 0) {
- gru_drop_mmu_notifier(gts->ts_gms);
+ if (gts->ts_gms)
+ gru_drop_mmu_notifier(gts->ts_gms);
kfree(gts);
STAT(gts_free);
}
@@ -310,16 +320,18 @@ static struct gru_thread_state *gru_find_current_gts_nolock(struct gru_vma_data
* Allocate a thread state structure.
*/
struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma,
- int cbr_au_count, int dsr_au_count, int options, int tsid)
+ int cbr_au_count, int dsr_au_count,
+ unsigned char tlb_preload_count, int options, int tsid)
{
struct gru_thread_state *gts;
+ struct gru_mm_struct *gms;
int bytes;
bytes = DSR_BYTES(dsr_au_count) + CBR_BYTES(cbr_au_count);
bytes += sizeof(struct gru_thread_state);
gts = kmalloc(bytes, GFP_KERNEL);
if (!gts)
- return NULL;
+ return ERR_PTR(-ENOMEM);
STAT(gts_alloc);
memset(gts, 0, sizeof(struct gru_thread_state)); /* zero out header */
@@ -327,7 +339,10 @@ struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma,
mutex_init(>s->ts_ctxlock);
gts->ts_cbr_au_count = cbr_au_count;
gts->ts_dsr_au_count = dsr_au_count;
+ gts->ts_tlb_preload_count = tlb_preload_count;
gts->ts_user_options = options;
+ gts->ts_user_blade_id = -1;
+ gts->ts_user_chiplet_id = -1;
gts->ts_tsid = tsid;
gts->ts_ctxnum = NULLCTX;
gts->ts_tlb_int_select = -1;
@@ -336,9 +351,10 @@ struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma,
if (vma) {
gts->ts_mm = current->mm;
gts->ts_vma = vma;
- gts->ts_gms = gru_register_mmu_notifier();
- if (!gts->ts_gms)
+ gms = gru_register_mmu_notifier();
+ if (IS_ERR(gms))
goto err;
+ gts->ts_gms = gms;
}
gru_dbg(grudev, "alloc gts %p\n", gts);
@@ -346,7 +362,7 @@ struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma,
err:
gts_drop(gts);
- return NULL;
+ return ERR_CAST(gms);
}
/*
@@ -360,6 +376,7 @@ struct gru_vma_data *gru_alloc_vma_data(struct vm_area_struct *vma, int tsid)
if (!vdata)
return NULL;
+ STAT(vdata_alloc);
INIT_LIST_HEAD(&vdata->vd_head);
spin_lock_init(&vdata->vd_lock);
gru_dbg(grudev, "alloc vdata %p\n", vdata);
@@ -392,10 +409,12 @@ struct gru_thread_state *gru_alloc_thread_state(struct vm_area_struct *vma,
struct gru_vma_data *vdata = vma->vm_private_data;
struct gru_thread_state *gts, *ngts;
- gts = gru_alloc_gts(vma, vdata->vd_cbr_au_count, vdata->vd_dsr_au_count,
+ gts = gru_alloc_gts(vma, vdata->vd_cbr_au_count,
+ vdata->vd_dsr_au_count,
+ vdata->vd_tlb_preload_count,
vdata->vd_user_options, tsid);
- if (!gts)
- return NULL;
+ if (IS_ERR(gts))
+ return gts;
spin_lock(&vdata->vd_lock);
ngts = gru_find_current_gts_nolock(vdata, tsid);
@@ -493,6 +512,9 @@ static void gru_load_context_data(void *save, void *grubase, int ctxnum,
memset(cbe + i * GRU_HANDLE_STRIDE, 0,
GRU_CACHE_LINE_BYTES);
}
+ /* Flush CBE to hide race in context restart */
+ mb();
+ gru_flush_cache(cbe + i * GRU_HANDLE_STRIDE);
cb += GRU_HANDLE_STRIDE;
}
@@ -513,6 +535,12 @@ static void gru_unload_context_data(void *save, void *grubase, int ctxnum,
cb = gseg + GRU_CB_BASE;
cbe = grubase + GRU_CBE_BASE;
length = hweight64(dsrmap) * GRU_DSR_AU_BYTES;
+
+ /* CBEs may not be coherent. Flush them from cache */
+ for_each_cbr_in_allocation_map(i, &cbrmap, scr)
+ gru_flush_cache(cbe + i * GRU_HANDLE_STRIDE);
+ mb(); /* Let the CL flush complete */
+
gru_prefetch_context(gseg, cb, cbe, cbrmap, length);
for_each_cbr_in_allocation_map(i, &cbrmap, scr) {
@@ -533,7 +561,8 @@ void gru_unload_context(struct gru_thread_state *gts, int savestate)
zap_vma_ptes(gts->ts_vma, UGRUADDR(gts), GRU_GSEG_PAGESIZE);
cch = get_cch(gru->gs_gru_base_vaddr, ctxnum);
- gru_dbg(grudev, "gts %p\n", gts);
+ gru_dbg(grudev, "gts %p, cbrmap 0x%lx, dsrmap 0x%lx\n",
+ gts, gts->ts_cbr_map, gts->ts_dsr_map);
lock_cch_handle(cch);
if (cch_interrupt_sync(cch))
BUG();
@@ -549,7 +578,6 @@ void gru_unload_context(struct gru_thread_state *gts, int savestate)
if (cch_deallocate(cch))
BUG();
- gts->ts_force_unload = 0; /* ts_force_unload locked by CCH lock */
unlock_cch_handle(cch);
gru_free_gru_context(gts);
@@ -565,9 +593,7 @@ void gru_load_context(struct gru_thread_state *gts)
struct gru_context_configuration_handle *cch;
int i, err, asid, ctxnum = gts->ts_ctxnum;
- gru_dbg(grudev, "gts %p\n", gts);
cch = get_cch(gru->gs_gru_base_vaddr, ctxnum);
-
lock_cch_handle(cch);
cch->tfm_fault_bit_enable =
(gts->ts_user_options == GRU_OPT_MISS_FMM_POLL
@@ -591,6 +617,7 @@ void gru_load_context(struct gru_thread_state *gts)
cch->unmap_enable = 1;
cch->tfm_done_bit_enable = 1;
cch->cb_int_enable = 1;
+ cch->tlb_int_select = 0; /* For now, ints go to cpu 0 */
} else {
cch->unmap_enable = 0;
cch->tfm_done_bit_enable = 0;
@@ -616,17 +643,18 @@ void gru_load_context(struct gru_thread_state *gts)
if (cch_start(cch))
BUG();
unlock_cch_handle(cch);
+
+ gru_dbg(grudev, "gid %d, gts %p, cbrmap 0x%lx, dsrmap 0x%lx, tie %d, tis %d\n",
+ gts->ts_gru->gs_gid, gts, gts->ts_cbr_map, gts->ts_dsr_map,
+ (gts->ts_user_options == GRU_OPT_MISS_FMM_INTR), gts->ts_tlb_int_select);
}
/*
* Update fields in an active CCH:
* - retarget interrupts on local blade
* - update sizeavail mask
- * - force a delayed context unload by clearing the CCH asids. This
- * forces TLB misses for new GRU instructions. The context is unloaded
- * when the next TLB miss occurs.
*/
-int gru_update_cch(struct gru_thread_state *gts, int force_unload)
+int gru_update_cch(struct gru_thread_state *gts)
{
struct gru_context_configuration_handle *cch;
struct gru_state *gru = gts->ts_gru;
@@ -640,21 +668,13 @@ int gru_update_cch(struct gru_thread_state *gts, int force_unload)
goto exit;
if (cch_interrupt(cch))
BUG();
- if (!force_unload) {
- for (i = 0; i < 8; i++)
- cch->sizeavail[i] = gts->ts_sizeavail;
- gts->ts_tlb_int_select = gru_cpu_fault_map_id();
- cch->tlb_int_select = gru_cpu_fault_map_id();
- cch->tfm_fault_bit_enable =
- (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL
- || gts->ts_user_options == GRU_OPT_MISS_FMM_INTR);
- } else {
- for (i = 0; i < 8; i++)
- cch->asid[i] = 0;
- cch->tfm_fault_bit_enable = 0;
- cch->tlb_int_enable = 0;
- gts->ts_force_unload = 1;
- }
+ for (i = 0; i < 8; i++)
+ cch->sizeavail[i] = gts->ts_sizeavail;
+ gts->ts_tlb_int_select = gru_cpu_fault_map_id();
+ cch->tlb_int_select = gru_cpu_fault_map_id();
+ cch->tfm_fault_bit_enable =
+ (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL
+ || gts->ts_user_options == GRU_OPT_MISS_FMM_INTR);
if (cch_start(cch))
BUG();
ret = 1;
@@ -679,7 +699,54 @@ static int gru_retarget_intr(struct gru_thread_state *gts)
gru_dbg(grudev, "retarget from %d to %d\n", gts->ts_tlb_int_select,
gru_cpu_fault_map_id());
- return gru_update_cch(gts, 0);
+ return gru_update_cch(gts);
+}
+
+/*
+ * Check if a GRU context is allowed to use a specific chiplet. By default
+ * a context is assigned to any blade-local chiplet. However, users can
+ * override this.
+ * Returns 1 if assignment allowed, 0 otherwise
+ */
+static int gru_check_chiplet_assignment(struct gru_state *gru,
+ struct gru_thread_state *gts)
+{
+ int blade_id;
+ int chiplet_id;
+
+ blade_id = gts->ts_user_blade_id;
+ if (blade_id < 0)
+ blade_id = uv_numa_blade_id();
+
+ chiplet_id = gts->ts_user_chiplet_id;
+ return gru->gs_blade_id == blade_id &&
+ (chiplet_id < 0 || chiplet_id == gru->gs_chiplet_id);
+}
+
+/*
+ * Unload the gru context if it is not assigned to the correct blade or
+ * chiplet. Misassignment can occur if the process migrates to a different
+ * blade or if the user changes the selected blade/chiplet.
+ */
+void gru_check_context_placement(struct gru_thread_state *gts)
+{
+ struct gru_state *gru;
+
+ /*
+ * If the current task is the context owner, verify that the
+ * context is correctly placed. This test is skipped for non-owner
+ * references. Pthread apps use non-owner references to the CBRs.
+ */
+ gru = gts->ts_gru;
+ if (!gru || gts->ts_tgid_owner != current->tgid)
+ return;
+
+ if (!gru_check_chiplet_assignment(gru, gts)) {
+ STAT(check_context_unload);
+ gru_unload_context(gts, 1);
+ } else if (gru_retarget_intr(gts)) {
+ STAT(check_context_retarget_intr);
+ }
}
@@ -712,13 +779,17 @@ static void gts_stolen(struct gru_thread_state *gts,
}
}
-void gru_steal_context(struct gru_thread_state *gts, int blade_id)
+void gru_steal_context(struct gru_thread_state *gts)
{
struct gru_blade_state *blade;
struct gru_state *gru, *gru0;
struct gru_thread_state *ngts = NULL;
int ctxnum, ctxnum0, flag = 0, cbr, dsr;
+ int blade_id;
+ blade_id = gts->ts_user_blade_id;
+ if (blade_id < 0)
+ blade_id = uv_numa_blade_id();
cbr = gts->ts_cbr_au_count;
dsr = gts->ts_dsr_au_count;
@@ -729,35 +800,39 @@ void gru_steal_context(struct gru_thread_state *gts, int blade_id)
gru = blade->bs_lru_gru;
if (ctxnum == 0)
gru = next_gru(blade, gru);
+ blade->bs_lru_gru = gru;
+ blade->bs_lru_ctxnum = ctxnum;
ctxnum0 = ctxnum;
gru0 = gru;
while (1) {
- if (check_gru_resources(gru, cbr, dsr, GRU_NUM_CCH))
- break;
- spin_lock(&gru->gs_lock);
- for (; ctxnum < GRU_NUM_CCH; ctxnum++) {
- if (flag && gru == gru0 && ctxnum == ctxnum0)
+ if (gru_check_chiplet_assignment(gru, gts)) {
+ if (check_gru_resources(gru, cbr, dsr, GRU_NUM_CCH))
break;
- ngts = gru->gs_gts[ctxnum];
- /*
- * We are grabbing locks out of order, so trylock is
- * needed. GTSs are usually not locked, so the odds of
- * success are high. If trylock fails, try to steal a
- * different GSEG.
- */
- if (ngts && is_gts_stealable(ngts, blade))
+ spin_lock(&gru->gs_lock);
+ for (; ctxnum < GRU_NUM_CCH; ctxnum++) {
+ if (flag && gru == gru0 && ctxnum == ctxnum0)
+ break;
+ ngts = gru->gs_gts[ctxnum];
+ /*
+ * We are grabbing locks out of order, so trylock is
+ * needed. GTSs are usually not locked, so the odds of
+ * success are high. If trylock fails, try to steal a
+ * different GSEG.
+ */
+ if (ngts && is_gts_stealable(ngts, blade))
+ break;
+ ngts = NULL;
+ }
+ spin_unlock(&gru->gs_lock);
+ if (ngts || (flag && gru == gru0 && ctxnum == ctxnum0))
break;
- ngts = NULL;
- flag = 1;
}
- spin_unlock(&gru->gs_lock);
- if (ngts || (flag && gru == gru0 && ctxnum == ctxnum0))
+ if (flag && gru == gru0)
break;
+ flag = 1;
ctxnum = 0;
gru = next_gru(blade, gru);
}
- blade->bs_lru_gru = gru;
- blade->bs_lru_ctxnum = ctxnum;
spin_unlock(&blade->bs_lock);
if (ngts) {
@@ -775,20 +850,35 @@ void gru_steal_context(struct gru_thread_state *gts, int blade_id)
hweight64(gru->gs_dsr_map));
}
+/*
+ * Assign a gru context.
+ */
+static int gru_assign_context_number(struct gru_state *gru)
+{
+ int ctxnum;
+
+ ctxnum = find_first_zero_bit(&gru->gs_context_map, GRU_NUM_CCH);
+ __set_bit(ctxnum, &gru->gs_context_map);
+ return ctxnum;
+}
+
/*
* Scan the GRUs on the local blade & assign a GRU context.
*/
-struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts,
- int blade)
+struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts)
{
struct gru_state *gru, *grux;
int i, max_active_contexts;
+ int blade_id = gts->ts_user_blade_id;
-
+ if (blade_id < 0)
+ blade_id = uv_numa_blade_id();
again:
gru = NULL;
max_active_contexts = GRU_NUM_CCH;
- for_each_gru_on_blade(grux, blade, i) {
+ for_each_gru_on_blade(grux, blade_id, i) {
+ if (!gru_check_chiplet_assignment(grux, gts))
+ continue;
if (check_gru_resources(grux, gts->ts_cbr_au_count,
gts->ts_dsr_au_count,
max_active_contexts)) {
@@ -809,12 +899,9 @@ again:
reserve_gru_resources(gru, gts);
gts->ts_gru = gru;
gts->ts_blade = gru->gs_blade_id;
- gts->ts_ctxnum =
- find_first_zero_bit(&gru->gs_context_map, GRU_NUM_CCH);
- BUG_ON(gts->ts_ctxnum == GRU_NUM_CCH);
+ gts->ts_ctxnum = gru_assign_context_number(gru);
atomic_inc(>s->ts_refcnt);
gru->gs_gts[gts->ts_ctxnum] = gts;
- __set_bit(gts->ts_ctxnum, &gru->gs_context_map);
spin_unlock(&gru->gs_lock);
STAT(assign_context);
@@ -842,7 +929,6 @@ int gru_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct gru_thread_state *gts;
unsigned long paddr, vaddr;
- int blade_id;
vaddr = (unsigned long)vmf->virtual_address;
gru_dbg(grudev, "vma %p, vaddr 0x%lx (0x%lx)\n",
@@ -857,28 +943,18 @@ int gru_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
again:
mutex_lock(>s->ts_ctxlock);
preempt_disable();
- blade_id = uv_numa_blade_id();
- if (gts->ts_gru) {
- if (gts->ts_gru->gs_blade_id != blade_id) {
- STAT(migrated_nopfn_unload);
- gru_unload_context(gts, 1);
- } else {
- if (gru_retarget_intr(gts))
- STAT(migrated_nopfn_retarget);
- }
- }
+ gru_check_context_placement(gts);
if (!gts->ts_gru) {
STAT(load_user_context);
- if (!gru_assign_gru_context(gts, blade_id)) {
+ if (!gru_assign_gru_context(gts)) {
preempt_enable();
mutex_unlock(>s->ts_ctxlock);
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(GRU_ASSIGN_DELAY); /* true hack ZZZ */
- blade_id = uv_numa_blade_id();
if (gts->ts_steal_jiffies + GRU_STEAL_DELAY < jiffies)
- gru_steal_context(gts, blade_id);
+ gru_steal_context(gts);
goto again;
}
gru_load_context(gts);
diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
index 3f2375c5ba5..7768b87d995 100644
--- a/drivers/misc/sgi-gru/gruprocfs.c
+++ b/drivers/misc/sgi-gru/gruprocfs.c
@@ -36,8 +36,7 @@ static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
{
unsigned long val = atomic_long_read(v);
- if (val)
- seq_printf(s, "%16lu %s\n", val, id);
+ seq_printf(s, "%16lu %s\n", val, id);
}
static int statistics_show(struct seq_file *s, void *p)
@@ -46,7 +45,8 @@ static int statistics_show(struct seq_file *s, void *p)
printstat(s, vdata_free);
printstat(s, gts_alloc);
printstat(s, gts_free);
- printstat(s, vdata_double_alloc);
+ printstat(s, gms_alloc);
+ printstat(s, gms_free);
printstat(s, gts_double_allocate);
printstat(s, assign_context);
printstat(s, assign_context_failed);
@@ -59,28 +59,25 @@ static int statistics_show(struct seq_file *s, void *p)
printstat(s, steal_kernel_context);
printstat(s, steal_context_failed);
printstat(s, nopfn);
- printstat(s, break_cow);
printstat(s, asid_new);
printstat(s, asid_next);
printstat(s, asid_wrap);
printstat(s, asid_reuse);
printstat(s, intr);
+ printstat(s, intr_cbr);
+ printstat(s, intr_tfh);
+ printstat(s, intr_spurious);
printstat(s, intr_mm_lock_failed);
printstat(s, call_os);
- printstat(s, call_os_offnode_reference);
- printstat(s, call_os_check_for_bug);
printstat(s, call_os_wait_queue);
printstat(s, user_flush_tlb);
printstat(s, user_unload_context);
printstat(s, user_exception);
printstat(s, set_context_option);
- printstat(s, migrate_check);
- printstat(s, migrated_retarget);
- printstat(s, migrated_unload);
- printstat(s, migrated_unload_delay);
- printstat(s, migrated_nopfn_retarget);
- printstat(s, migrated_nopfn_unload);
+ printstat(s, check_context_retarget_intr);
+ printstat(s, check_context_unload);
printstat(s, tlb_dropin);
+ printstat(s, tlb_preload_page);
printstat(s, tlb_dropin_fail_no_asid);
printstat(s, tlb_dropin_fail_upm);
printstat(s, tlb_dropin_fail_invalid);
@@ -88,16 +85,15 @@ static int statistics_show(struct seq_file *s, void *p)
printstat(s, tlb_dropin_fail_idle);
printstat(s, tlb_dropin_fail_fmm);
printstat(s, tlb_dropin_fail_no_exception);
- printstat(s, tlb_dropin_fail_no_exception_war);
printstat(s, tfh_stale_on_fault);
printstat(s, mmu_invalidate_range);
printstat(s, mmu_invalidate_page);
- printstat(s, mmu_clear_flush_young);
printstat(s, flush_tlb);
printstat(s, flush_tlb_gru);
printstat(s, flush_tlb_gru_tgh);
printstat(s, flush_tlb_gru_zero_asid);
printstat(s, copy_gpa);
+ printstat(s, read_gpa);
printstat(s, mesq_receive);
printstat(s, mesq_receive_none);
printstat(s, mesq_send);
@@ -108,7 +104,6 @@ static int statistics_show(struct seq_file *s, void *p)
printstat(s, mesq_send_qlimit_reached);
printstat(s, mesq_send_amo_nacked);
printstat(s, mesq_send_put_nacked);
- printstat(s, mesq_qf_not_full);
printstat(s, mesq_qf_locked);
printstat(s, mesq_qf_noop_not_full);
printstat(s, mesq_qf_switch_head_failed);
@@ -118,6 +113,7 @@ static int statistics_show(struct seq_file *s, void *p)
printstat(s, mesq_noop_qlimit_reached);
printstat(s, mesq_noop_amo_nacked);
printstat(s, mesq_noop_put_nacked);
+ printstat(s, mesq_noop_page_overflow);
return 0;
}
@@ -133,8 +129,10 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
int op;
unsigned long total, count, max;
static char *id[] = {"cch_allocate", "cch_start", "cch_interrupt",
- "cch_interrupt_sync", "cch_deallocate", "tgh_invalidate"};
+ "cch_interrupt_sync", "cch_deallocate", "tfh_write_only",
+ "tfh_write_restart", "tgh_invalidate"};
+ seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
for (op = 0; op < mcsop_last; op++) {
count = atomic_long_read(&mcs_op_statistics[op].count);
total = atomic_long_read(&mcs_op_statistics[op].total);
@@ -154,6 +152,7 @@ static ssize_t mcs_statistics_write(struct file *file,
static int options_show(struct seq_file *s, void *p)
{
+ seq_printf(s, "#bitmask: 1=trace, 2=statistics\n");
seq_printf(s, "0x%lx\n", gru_options);
return 0;
}
@@ -183,16 +182,17 @@ static int cch_seq_show(struct seq_file *file, void *data)
const char *mode[] = { "??", "UPM", "INTR", "OS_POLL" };
if (gid == 0)
- seq_printf(file, "#%5s%5s%6s%9s%6s%8s%8s\n", "gid", "bid",
- "ctx#", "pid", "cbrs", "dsbytes", "mode");
+ seq_printf(file, "#%5s%5s%6s%7s%9s%6s%8s%8s\n", "gid", "bid",
+ "ctx#", "asid", "pid", "cbrs", "dsbytes", "mode");
if (gru)
for (i = 0; i < GRU_NUM_CCH; i++) {
ts = gru->gs_gts[i];
if (!ts)
continue;
- seq_printf(file, " %5d%5d%6d%9d%6d%8d%8s\n",
+ seq_printf(file, " %5d%5d%6d%7d%9d%6d%8d%8s\n",
gru->gs_gid, gru->gs_blade_id, i,
- ts->ts_tgid_owner,
+ is_kernel_context(ts) ? 0 : ts->ts_gms->ms_asids[gid].mt_asid,
+ is_kernel_context(ts) ? 0 : ts->ts_tgid_owner,
ts->ts_cbr_au_count * GRU_CBR_AU_SIZE,
ts->ts_cbr_au_count * GRU_DSR_AU_BYTES,
mode[ts->ts_user_options &
@@ -355,7 +355,7 @@ static void delete_proc_files(void)
for (p = proc_files; p->name; p++)
if (p->entry)
remove_proc_entry(p->name, proc_gru);
- remove_proc_entry("gru", NULL);
+ remove_proc_entry("gru", proc_gru->parent);
}
}
diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
index 46990bcfa53..02a77b8b8ee 100644
--- a/drivers/misc/sgi-gru/grutables.h
+++ b/drivers/misc/sgi-gru/grutables.h
@@ -161,7 +161,7 @@ extern unsigned int gru_max_gids;
#define GRU_MAX_GRUS (GRU_MAX_BLADES * GRU_CHIPLETS_PER_BLADE)
#define GRU_DRIVER_ID_STR "SGI GRU Device Driver"
-#define GRU_DRIVER_VERSION_STR "0.80"
+#define GRU_DRIVER_VERSION_STR "0.85"
/*
* GRU statistics.
@@ -171,7 +171,8 @@ struct gru_stats_s {
atomic_long_t vdata_free;
atomic_long_t gts_alloc;
atomic_long_t gts_free;
- atomic_long_t vdata_double_alloc;
+ atomic_long_t gms_alloc;
+ atomic_long_t gms_free;
atomic_long_t gts_double_allocate;
atomic_long_t assign_context;
atomic_long_t assign_context_failed;
@@ -184,28 +185,25 @@ struct gru_stats_s {
atomic_long_t steal_kernel_context;
atomic_long_t steal_context_failed;
atomic_long_t nopfn;
- atomic_long_t break_cow;
atomic_long_t asid_new;
atomic_long_t asid_next;
atomic_long_t asid_wrap;
atomic_long_t asid_reuse;
atomic_long_t intr;
+ atomic_long_t intr_cbr;
+ atomic_long_t intr_tfh;
+ atomic_long_t intr_spurious;
atomic_long_t intr_mm_lock_failed;
atomic_long_t call_os;
- atomic_long_t call_os_offnode_reference;
- atomic_long_t call_os_check_for_bug;
atomic_long_t call_os_wait_queue;
atomic_long_t user_flush_tlb;
atomic_long_t user_unload_context;
atomic_long_t user_exception;
atomic_long_t set_context_option;
- atomic_long_t migrate_check;
- atomic_long_t migrated_retarget;
- atomic_long_t migrated_unload;
- atomic_long_t migrated_unload_delay;
- atomic_long_t migrated_nopfn_retarget;
- atomic_long_t migrated_nopfn_unload;
+ atomic_long_t check_context_retarget_intr;
+ atomic_long_t check_context_unload;
atomic_long_t tlb_dropin;
+ atomic_long_t tlb_preload_page;
atomic_long_t tlb_dropin_fail_no_asid;
atomic_long_t tlb_dropin_fail_upm;
atomic_long_t tlb_dropin_fail_invalid;
@@ -213,17 +211,16 @@ struct gru_stats_s {
atomic_long_t tlb_dropin_fail_idle;
atomic_long_t tlb_dropin_fail_fmm;
atomic_long_t tlb_dropin_fail_no_exception;
- atomic_long_t tlb_dropin_fail_no_exception_war;
atomic_long_t tfh_stale_on_fault;
atomic_long_t mmu_invalidate_range;
atomic_long_t mmu_invalidate_page;
- atomic_long_t mmu_clear_flush_young;
atomic_long_t flush_tlb;
atomic_long_t flush_tlb_gru;
atomic_long_t flush_tlb_gru_tgh;
atomic_long_t flush_tlb_gru_zero_asid;
atomic_long_t copy_gpa;
+ atomic_long_t read_gpa;
atomic_long_t mesq_receive;
atomic_long_t mesq_receive_none;
@@ -235,7 +232,7 @@ struct gru_stats_s {
atomic_long_t mesq_send_qlimit_reached;
atomic_long_t mesq_send_amo_nacked;
atomic_long_t mesq_send_put_nacked;
- atomic_long_t mesq_qf_not_full;
+ atomic_long_t mesq_page_overflow;
atomic_long_t mesq_qf_locked;
atomic_long_t mesq_qf_noop_not_full;
atomic_long_t mesq_qf_switch_head_failed;
@@ -245,11 +242,13 @@ struct gru_stats_s {
atomic_long_t mesq_noop_qlimit_reached;
atomic_long_t mesq_noop_amo_nacked;
atomic_long_t mesq_noop_put_nacked;
+ atomic_long_t mesq_noop_page_overflow;
};
enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
- cchop_deallocate, tghop_invalidate, mcsop_last};
+ cchop_deallocate, tfhop_write_only, tfhop_write_restart,
+ tghop_invalidate, mcsop_last};
struct mcs_op_statistic {
atomic_long_t count;
@@ -259,8 +258,8 @@ struct mcs_op_statistic {
extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
-#define OPT_DPRINT 1
-#define OPT_STATS 2
+#define OPT_DPRINT 1
+#define OPT_STATS 2
#define IRQ_GRU 110 /* Starting IRQ number for interrupts */
@@ -283,7 +282,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
#define gru_dbg(dev, fmt, x...) \
do { \
if (gru_options & OPT_DPRINT) \
- dev_dbg(dev, "%s: " fmt, __func__, x); \
+ printk(KERN_DEBUG "GRU:%d %s: " fmt, smp_processor_id(), __func__, x);\
} while (0)
#else
#define gru_dbg(x...)
@@ -297,13 +296,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
#define ASID_INC 8 /* number of regions */
/* Generate a GRU asid value from a GRU base asid & a virtual address. */
-#if defined CONFIG_IA64
#define VADDR_HI_BIT 64
-#elif defined CONFIG_X86_64
-#define VADDR_HI_BIT 48
-#else
-#error "Unsupported architecture"
-#endif
#define GRUREGION(addr) ((addr) >> (VADDR_HI_BIT - 3) & 3)
#define GRUASID(asid, addr) ((asid) + GRUREGION(addr))
@@ -345,6 +338,7 @@ struct gru_vma_data {
long vd_user_options;/* misc user option flags */
int vd_cbr_au_count;
int vd_dsr_au_count;
+ unsigned char vd_tlb_preload_count;
};
/*
@@ -360,6 +354,7 @@ struct gru_thread_state {
struct gru_state *ts_gru; /* GRU where the context is
loaded */
struct gru_mm_struct *ts_gms; /* asid & ioproc struct */
+ unsigned char ts_tlb_preload_count; /* TLB preload pages */
unsigned long ts_cbr_map; /* map of allocated CBRs */
unsigned long ts_dsr_map; /* map of allocated DATA
resources */
@@ -368,6 +363,8 @@ struct gru_thread_state {
long ts_user_options;/* misc user option flags */
pid_t ts_tgid_owner; /* task that is using the
context - for migration */
+ short ts_user_blade_id;/* user selected blade */
+ char ts_user_chiplet_id;/* user selected chiplet */
unsigned short ts_sizeavail; /* Pagesizes in use */
int ts_tsid; /* thread that owns the
structure */
@@ -384,13 +381,11 @@ struct gru_thread_state {
char ts_blade; /* If >= 0, migrate context if
ref from diferent blade */
char ts_force_cch_reload;
- char ts_force_unload;/* force context to be unloaded
- after migration */
char ts_cbr_idx[GRU_CBR_AU];/* CBR numbers of each
allocated CB */
int ts_data_valid; /* Indicates if ts_gdata has
valid data */
- struct gts_statistics ustats; /* User statistics */
+ struct gru_gseg_statistics ustats; /* User statistics */
unsigned long ts_gdata[0]; /* save area for GRU data (CB,
DS, CBE) */
};
@@ -422,6 +417,7 @@ struct gru_state {
gru segments (64) */
unsigned short gs_gid; /* unique GRU number */
unsigned short gs_blade_id; /* blade of GRU */
+ unsigned char gs_chiplet_id; /* blade chiplet of GRU */
unsigned char gs_tgh_local_shift; /* used to pick TGH for
local flush */
unsigned char gs_tgh_first_remote; /* starting TGH# for
@@ -453,6 +449,7 @@ struct gru_state {
in use */
struct gru_thread_state *gs_gts[GRU_NUM_CCH]; /* GTS currently using
the context */
+ int gs_irq[GRU_NUM_TFM]; /* Interrupt irqs */
};
/*
@@ -619,6 +616,15 @@ static inline int is_kernel_context(struct gru_thread_state *gts)
return !gts->ts_mm;
}
+/*
+ * The following are for Nehelem-EX. A more general scheme is needed for
+ * future processors.
+ */
+#define UV_MAX_INT_CORES 8
+#define uv_cpu_socket_number(p) ((cpu_physical_id(p) >> 5) & 1)
+#define uv_cpu_ht_number(p) (cpu_physical_id(p) & 1)
+#define uv_cpu_core_number(p) (((cpu_physical_id(p) >> 2) & 4) | \
+ ((cpu_physical_id(p) >> 1) & 3))
/*-----------------------------------------------------------------------------
* Function prototypes & externs
*/
@@ -633,24 +639,26 @@ extern struct gru_thread_state *gru_find_thread_state(struct vm_area_struct
*vma, int tsid);
extern struct gru_thread_state *gru_alloc_thread_state(struct vm_area_struct
*vma, int tsid);
-extern struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts,
- int blade);
+extern struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts);
extern void gru_load_context(struct gru_thread_state *gts);
-extern void gru_steal_context(struct gru_thread_state *gts, int blade_id);
+extern void gru_steal_context(struct gru_thread_state *gts);
extern void gru_unload_context(struct gru_thread_state *gts, int savestate);
-extern int gru_update_cch(struct gru_thread_state *gts, int force_unload);
+extern int gru_update_cch(struct gru_thread_state *gts);
extern void gts_drop(struct gru_thread_state *gts);
extern void gru_tgh_flush_init(struct gru_state *gru);
extern int gru_kservices_init(void);
extern void gru_kservices_exit(void);
+extern irqreturn_t gru0_intr(int irq, void *dev_id);
+extern irqreturn_t gru1_intr(int irq, void *dev_id);
+extern irqreturn_t gru_intr_mblade(int irq, void *dev_id);
extern int gru_dump_chiplet_request(unsigned long arg);
extern long gru_get_gseg_statistics(unsigned long arg);
-extern irqreturn_t gru_intr(int irq, void *dev_id);
extern int gru_handle_user_call_os(unsigned long address);
extern int gru_user_flush_tlb(unsigned long arg);
extern int gru_user_unload_context(unsigned long arg);
extern int gru_get_exception_detail(unsigned long arg);
extern int gru_set_context_option(unsigned long address);
+extern void gru_check_context_placement(struct gru_thread_state *gts);
extern int gru_cpu_fault_map_id(void);
extern struct vm_area_struct *gru_find_vma(unsigned long vaddr);
extern void gru_flush_all_tlb(struct gru_state *gru);
@@ -658,7 +666,8 @@ extern int gru_proc_init(void);
extern void gru_proc_exit(void);
extern struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma,
- int cbr_au_count, int dsr_au_count, int options, int tsid);
+ int cbr_au_count, int dsr_au_count,
+ unsigned char tlb_preload_count, int options, int tsid);
extern unsigned long gru_reserve_cb_resources(struct gru_state *gru,
int cbr_au_count, char *cbmap);
extern unsigned long gru_reserve_ds_resources(struct gru_state *gru,
diff --git a/drivers/misc/sgi-gru/grutlbpurge.c b/drivers/misc/sgi-gru/grutlbpurge.c
index 1d125091f5e..240a6d36166 100644
--- a/drivers/misc/sgi-gru/grutlbpurge.c
+++ b/drivers/misc/sgi-gru/grutlbpurge.c
@@ -184,8 +184,8 @@ void gru_flush_tlb_range(struct gru_mm_struct *gms, unsigned long start,
STAT(flush_tlb_gru_tgh);
asid = GRUASID(asid, start);
gru_dbg(grudev,
- " FLUSH gruid %d, asid 0x%x, num %ld, cbmap 0x%x\n",
- gid, asid, num, asids->mt_ctxbitmap);
+ " FLUSH gruid %d, asid 0x%x, vaddr 0x%lx, vamask 0x%x, num %ld, cbmap 0x%x\n",
+ gid, asid, start, grupagesize, num, asids->mt_ctxbitmap);
tgh = get_lock_tgh_handle(gru);
tgh_invalidate(tgh, start, ~0, asid, grupagesize, 0,
num - 1, asids->mt_ctxbitmap);
@@ -299,6 +299,7 @@ struct gru_mm_struct *gru_register_mmu_notifier(void)
{
struct gru_mm_struct *gms;
struct mmu_notifier *mn;
+ int err;
mn = mmu_find_ops(current->mm, &gru_mmuops);
if (mn) {
@@ -307,16 +308,22 @@ struct gru_mm_struct *gru_register_mmu_notifier(void)
} else {
gms = kzalloc(sizeof(*gms), GFP_KERNEL);
if (gms) {
+ STAT(gms_alloc);
spin_lock_init(&gms->ms_asid_lock);
gms->ms_notifier.ops = &gru_mmuops;
atomic_set(&gms->ms_refcnt, 1);
init_waitqueue_head(&gms->ms_wait_queue);
- __mmu_notifier_register(&gms->ms_notifier, current->mm);
+ err = __mmu_notifier_register(&gms->ms_notifier, current->mm);
+ if (err)
+ goto error;
}
}
gru_dbg(grudev, "gms %p, refcnt %d\n", gms,
atomic_read(&gms->ms_refcnt));
return gms;
+error:
+ kfree(gms);
+ return ERR_PTR(err);
}
void gru_drop_mmu_notifier(struct gru_mm_struct *gms)
@@ -327,6 +334,7 @@ void gru_drop_mmu_notifier(struct gru_mm_struct *gms)
if (!gms->ms_released)
mmu_notifier_unregister(&gms->ms_notifier, current->mm);
kfree(gms);
+ STAT(gms_free);
}
}
diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
index 2275126cb33..851b2f25ce0 100644
--- a/drivers/misc/sgi-xp/xp.h
+++ b/drivers/misc/sgi-xp/xp.h
@@ -339,6 +339,7 @@ extern short xp_partition_id;
extern u8 xp_region_size;
extern unsigned long (*xp_pa) (void *);
+extern unsigned long (*xp_socket_pa) (unsigned long);
extern enum xp_retval (*xp_remote_memcpy) (unsigned long, const unsigned long,
size_t);
extern int (*xp_cpu_to_nasid) (int);
diff --git a/drivers/misc/sgi-xp/xp_main.c b/drivers/misc/sgi-xp/xp_main.c
index 7896849b16d..01be66d02ca 100644
--- a/drivers/misc/sgi-xp/xp_main.c
+++ b/drivers/misc/sgi-xp/xp_main.c
@@ -44,6 +44,9 @@ EXPORT_SYMBOL_GPL(xp_region_size);
unsigned long (*xp_pa) (void *addr);
EXPORT_SYMBOL_GPL(xp_pa);
+unsigned long (*xp_socket_pa) (unsigned long gpa);
+EXPORT_SYMBOL_GPL(xp_socket_pa);
+
enum xp_retval (*xp_remote_memcpy) (unsigned long dst_gpa,
const unsigned long src_gpa, size_t len);
EXPORT_SYMBOL_GPL(xp_remote_memcpy);
diff --git a/drivers/misc/sgi-xp/xp_sn2.c b/drivers/misc/sgi-xp/xp_sn2.c
index fb3ec9d735a..d8e463f8724 100644
--- a/drivers/misc/sgi-xp/xp_sn2.c
+++ b/drivers/misc/sgi-xp/xp_sn2.c
@@ -83,6 +83,15 @@ xp_pa_sn2(void *addr)
return __pa(addr);
}
+/*
+ * Convert a global physical to a socket physical address.
+ */
+static unsigned long
+xp_socket_pa_sn2(unsigned long gpa)
+{
+ return gpa;
+}
+
/*
* Wrapper for bte_copy().
*
@@ -162,6 +171,7 @@ xp_init_sn2(void)
xp_region_size = sn_region_size;
xp_pa = xp_pa_sn2;
+ xp_socket_pa = xp_socket_pa_sn2;
xp_remote_memcpy = xp_remote_memcpy_sn2;
xp_cpu_to_nasid = xp_cpu_to_nasid_sn2;
xp_expand_memprotect = xp_expand_memprotect_sn2;
diff --git a/drivers/misc/sgi-xp/xp_uv.c b/drivers/misc/sgi-xp/xp_uv.c
index d238576b26f..a0d093274dc 100644
--- a/drivers/misc/sgi-xp/xp_uv.c
+++ b/drivers/misc/sgi-xp/xp_uv.c
@@ -32,12 +32,44 @@ xp_pa_uv(void *addr)
return uv_gpa(addr);
}
+/*
+ * Convert a global physical to socket physical address.
+ */
+static unsigned long
+xp_socket_pa_uv(unsigned long gpa)
+{
+ return uv_gpa_to_soc_phys_ram(gpa);
+}
+
+static enum xp_retval
+xp_remote_mmr_read(unsigned long dst_gpa, const unsigned long src_gpa,
+ size_t len)
+{
+ int ret;
+ unsigned long *dst_va = __va(uv_gpa_to_soc_phys_ram(dst_gpa));
+
+ BUG_ON(!uv_gpa_in_mmr_space(src_gpa));
+ BUG_ON(len != 8);
+
+ ret = gru_read_gpa(dst_va, src_gpa);
+ if (ret == 0)
+ return xpSuccess;
+
+ dev_err(xp, "gru_read_gpa() failed, dst_gpa=0x%016lx src_gpa=0x%016lx "
+ "len=%ld\n", dst_gpa, src_gpa, len);
+ return xpGruCopyError;
+}
+
+
static enum xp_retval
xp_remote_memcpy_uv(unsigned long dst_gpa, const unsigned long src_gpa,
size_t len)
{
int ret;
+ if (uv_gpa_in_mmr_space(src_gpa))
+ return xp_remote_mmr_read(dst_gpa, src_gpa, len);
+
ret = gru_copy_gpa(dst_gpa, src_gpa, len);
if (ret == 0)
return xpSuccess;
@@ -123,6 +155,7 @@ xp_init_uv(void)
xp_region_size = sn_region_size;
xp_pa = xp_pa_uv;
+ xp_socket_pa = xp_socket_pa_uv;
xp_remote_memcpy = xp_remote_memcpy_uv;
xp_cpu_to_nasid = xp_cpu_to_nasid_uv;
xp_expand_memprotect = xp_expand_memprotect_uv;
diff --git a/drivers/misc/sgi-xp/xpc_partition.c b/drivers/misc/sgi-xp/xpc_partition.c
index 65877bc5eda..9a6268c89fd 100644
--- a/drivers/misc/sgi-xp/xpc_partition.c
+++ b/drivers/misc/sgi-xp/xpc_partition.c
@@ -18,6 +18,7 @@
#include
#include
#include "xpc.h"
+#include
/* XPC is exiting flag */
int xpc_exiting;
@@ -92,8 +93,12 @@ xpc_get_rsvd_page_pa(int nasid)
break;
/* !!! L1_CACHE_ALIGN() is only a sn2-bte_copy requirement */
- if (L1_CACHE_ALIGN(len) > buf_len) {
- kfree(buf_base);
+ if (is_shub())
+ len = L1_CACHE_ALIGN(len);
+
+ if (len > buf_len) {
+ if (buf_base != NULL)
+ kfree(buf_base);
buf_len = L1_CACHE_ALIGN(len);
buf = xpc_kmalloc_cacheline_aligned(buf_len, GFP_KERNEL,
&buf_base);
@@ -105,7 +110,7 @@ xpc_get_rsvd_page_pa(int nasid)
}
}
- ret = xp_remote_memcpy(xp_pa(buf), rp_pa, buf_len);
+ ret = xp_remote_memcpy(xp_pa(buf), rp_pa, len);
if (ret != xpSuccess) {
dev_dbg(xpc_part, "xp_remote_memcpy failed %d\n", ret);
break;
@@ -143,7 +148,7 @@ xpc_setup_rsvd_page(void)
dev_err(xpc_part, "SAL failed to locate the reserved page\n");
return -ESRCH;
}
- rp = (struct xpc_rsvd_page *)__va(rp_pa);
+ rp = (struct xpc_rsvd_page *)__va(xp_socket_pa(rp_pa));
if (rp->SAL_version < 3) {
/* SAL_versions < 3 had a SAL_partid defined as a u8 */
diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
index b5bbe59f9c5..8725d5e8ab0 100644
--- a/drivers/misc/sgi-xp/xpc_uv.c
+++ b/drivers/misc/sgi-xp/xpc_uv.c
@@ -157,22 +157,24 @@ xpc_gru_mq_watchlist_alloc_uv(struct xpc_gru_mq_uv *mq)
{
int ret;
-#if defined CONFIG_X86_64
- ret = uv_bios_mq_watchlist_alloc(mq->mmr_blade, uv_gpa(mq->address),
- mq->order, &mq->mmr_offset);
- if (ret < 0) {
- dev_err(xpc_part, "uv_bios_mq_watchlist_alloc() failed, "
- "ret=%d\n", ret);
- return ret;
- }
-#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
- ret = sn_mq_watchlist_alloc(mq->mmr_blade, (void *)uv_gpa(mq->address),
+#if defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
+ int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
+
+ ret = sn_mq_watchlist_alloc(mmr_pnode, (void *)uv_gpa(mq->address),
mq->order, &mq->mmr_offset);
if (ret < 0) {
dev_err(xpc_part, "sn_mq_watchlist_alloc() failed, ret=%d\n",
ret);
return -EBUSY;
}
+#elif defined CONFIG_X86_64
+ ret = uv_bios_mq_watchlist_alloc(uv_gpa(mq->address),
+ mq->order, &mq->mmr_offset);
+ if (ret < 0) {
+ dev_err(xpc_part, "uv_bios_mq_watchlist_alloc() failed, "
+ "ret=%d\n", ret);
+ return ret;
+ }
#else
#error not a supported configuration
#endif
@@ -185,12 +187,13 @@ static void
xpc_gru_mq_watchlist_free_uv(struct xpc_gru_mq_uv *mq)
{
int ret;
+ int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
#if defined CONFIG_X86_64
- ret = uv_bios_mq_watchlist_free(mq->mmr_blade, mq->watchlist_num);
+ ret = uv_bios_mq_watchlist_free(mmr_pnode, mq->watchlist_num);
BUG_ON(ret != BIOS_STATUS_SUCCESS);
#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
- ret = sn_mq_watchlist_free(mq->mmr_blade, mq->watchlist_num);
+ ret = sn_mq_watchlist_free(mmr_pnode, mq->watchlist_num);
BUG_ON(ret != SALRET_OK);
#else
#error not a supported configuration
@@ -204,6 +207,7 @@ xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name,
enum xp_retval xp_ret;
int ret;
int nid;
+ int nasid;
int pg_order;
struct page *page;
struct xpc_gru_mq_uv *mq;
@@ -259,9 +263,11 @@ xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name,
goto out_5;
}
+ nasid = UV_PNODE_TO_NASID(uv_cpu_to_pnode(cpu));
+
mmr_value = (struct uv_IO_APIC_route_entry *)&mq->mmr_value;
ret = gru_create_message_queue(mq->gru_mq_desc, mq->address, mq_size,
- nid, mmr_value->vector, mmr_value->dest);
+ nasid, mmr_value->vector, mmr_value->dest);
if (ret != 0) {
dev_err(xpc_part, "gru_create_message_queue() returned "
"error=%d\n", ret);
@@ -946,11 +952,13 @@ xpc_get_fifo_entry_uv(struct xpc_fifo_head_uv *head)
head->first = first->next;
if (head->first == NULL)
head->last = NULL;
+
+ head->n_entries--;
+ BUG_ON(head->n_entries < 0);
+
+ first->next = NULL;
}
- head->n_entries--;
- BUG_ON(head->n_entries < 0);
spin_unlock_irqrestore(&head->lock, irq_flags);
- first->next = NULL;
return first;
}
@@ -1019,7 +1027,8 @@ xpc_make_first_contact_uv(struct xpc_partition *part)
xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV);
- while (part->sn.uv.remote_act_state != XPC_P_AS_ACTIVATING) {
+ while (!((part->sn.uv.remote_act_state == XPC_P_AS_ACTIVATING) ||
+ (part->sn.uv.remote_act_state == XPC_P_AS_ACTIVE))) {
dev_dbg(xpc_part, "waiting to make first contact with "
"partition %d\n", XPC_PARTID(part));
@@ -1422,7 +1431,6 @@ xpc_handle_notify_mq_msg_uv(struct xpc_partition *part,
msg_slot = ch_uv->recv_msg_slots +
(msg->hdr.msg_slot_number % ch->remote_nentries) * ch->entry_size;
- BUG_ON(msg->hdr.msg_slot_number != msg_slot->hdr.msg_slot_number);
BUG_ON(msg_slot->hdr.size != 0);
memcpy(msg_slot, msg, msg->hdr.size);
@@ -1646,8 +1654,6 @@ xpc_received_payload_uv(struct xpc_channel *ch, void *payload)
sizeof(struct xpc_notify_mq_msghdr_uv));
if (ret != xpSuccess)
XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
-
- msg->hdr.msg_slot_number += ch->remote_nentries;
}
static struct xpc_arch_operations xpc_arch_ops_uv = {
diff --git a/drivers/net/mlx4/alloc.c b/drivers/net/mlx4/alloc.c
index ad95d5f7b63..8c8515619b8 100644
--- a/drivers/net/mlx4/alloc.c
+++ b/drivers/net/mlx4/alloc.c
@@ -72,35 +72,6 @@ void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj)
mlx4_bitmap_free_range(bitmap, obj, 1);
}
-static unsigned long find_aligned_range(unsigned long *bitmap,
- u32 start, u32 nbits,
- int len, int align)
-{
- unsigned long end, i;
-
-again:
- start = ALIGN(start, align);
-
- while ((start < nbits) && test_bit(start, bitmap))
- start += align;
-
- if (start >= nbits)
- return -1;
-
- end = start+len;
- if (end > nbits)
- return -1;
-
- for (i = start + 1; i < end; i++) {
- if (test_bit(i, bitmap)) {
- start = i + 1;
- goto again;
- }
- }
-
- return start;
-}
-
u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align)
{
u32 obj, i;
@@ -110,13 +81,13 @@ u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align)
spin_lock(&bitmap->lock);
- obj = find_aligned_range(bitmap->table, bitmap->last,
- bitmap->max, cnt, align);
+ obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max,
+ bitmap->last, cnt, align - 1);
if (obj >= bitmap->max) {
bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
& bitmap->mask;
- obj = find_aligned_range(bitmap->table, 0, bitmap->max,
- cnt, align);
+ obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max,
+ 0, cnt, align - 1);
}
if (obj < bitmap->max) {
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index 2597145a066..ad113b0f62d 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -3403,7 +3403,7 @@ static int __init parport_parse_param(const char *s, int *val,
*val = automatic;
else if (!strncmp(s, "none", 4))
*val = none;
- else if (nofifo && !strncmp(s, "nofifo", 4))
+ else if (nofifo && !strncmp(s, "nofifo", 6))
*val = nofifo;
else {
char *ep;
diff --git a/drivers/pnp/pnpbios/proc.c b/drivers/pnp/pnpbios/proc.c
index b35d921bac6..2d8ac43f78e 100644
--- a/drivers/pnp/pnpbios/proc.c
+++ b/drivers/pnp/pnpbios/proc.c
@@ -24,6 +24,7 @@
#include
#include
#include
+#include
#include
#include
@@ -33,42 +34,65 @@
static struct proc_dir_entry *proc_pnp = NULL;
static struct proc_dir_entry *proc_pnp_boot = NULL;
-static int proc_read_pnpconfig(char *buf, char **start, off_t pos,
- int count, int *eof, void *data)
+static int pnpconfig_proc_show(struct seq_file *m, void *v)
{
struct pnp_isa_config_struc pnps;
if (pnp_bios_isapnp_config(&pnps))
return -EIO;
- return snprintf(buf, count,
- "structure_revision %d\n"
- "number_of_CSNs %d\n"
- "ISA_read_data_port 0x%x\n",
- pnps.revision, pnps.no_csns, pnps.isa_rd_data_port);
+ seq_printf(m, "structure_revision %d\n"
+ "number_of_CSNs %d\n"
+ "ISA_read_data_port 0x%x\n",
+ pnps.revision, pnps.no_csns, pnps.isa_rd_data_port);
+ return 0;
}
-static int proc_read_escdinfo(char *buf, char **start, off_t pos,
- int count, int *eof, void *data)
+static int pnpconfig_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pnpconfig_proc_show, NULL);
+}
+
+static const struct file_operations pnpconfig_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = pnpconfig_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int escd_info_proc_show(struct seq_file *m, void *v)
{
struct escd_info_struc escd;
if (pnp_bios_escd_info(&escd))
return -EIO;
- return snprintf(buf, count,
- "min_ESCD_write_size %d\n"
+ seq_printf(m, "min_ESCD_write_size %d\n"
"ESCD_size %d\n"
"NVRAM_base 0x%x\n",
escd.min_escd_write_size,
escd.escd_size, escd.nv_storage_base);
+ return 0;
}
+static int escd_info_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, escd_info_proc_show, NULL);
+}
+
+static const struct file_operations escd_info_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = escd_info_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
#define MAX_SANE_ESCD_SIZE (32*1024)
-static int proc_read_escd(char *buf, char **start, off_t pos,
- int count, int *eof, void *data)
+static int escd_proc_show(struct seq_file *m, void *v)
{
struct escd_info_struc escd;
char *tmpbuf;
- int escd_size, escd_left_to_read, n;
+ int escd_size;
if (pnp_bios_escd_info(&escd))
return -EIO;
@@ -76,7 +100,7 @@ static int proc_read_escd(char *buf, char **start, off_t pos,
/* sanity check */
if (escd.escd_size > MAX_SANE_ESCD_SIZE) {
printk(KERN_ERR
- "PnPBIOS: proc_read_escd: ESCD size reported by BIOS escd_info call is too great\n");
+ "PnPBIOS: %s: ESCD size reported by BIOS escd_info call is too great\n", __func__);
return -EFBIG;
}
@@ -94,56 +118,75 @@ static int proc_read_escd(char *buf, char **start, off_t pos,
/* sanity check */
if (escd_size > MAX_SANE_ESCD_SIZE) {
- printk(KERN_ERR "PnPBIOS: proc_read_escd: ESCD size reported by"
- " BIOS read_escd call is too great\n");
+ printk(KERN_ERR "PnPBIOS: %s: ESCD size reported by"
+ " BIOS read_escd call is too great\n", __func__);
kfree(tmpbuf);
return -EFBIG;
}
- escd_left_to_read = escd_size - pos;
- if (escd_left_to_read < 0)
- escd_left_to_read = 0;
- if (escd_left_to_read == 0)
- *eof = 1;
- n = min(count, escd_left_to_read);
- memcpy(buf, tmpbuf + pos, n);
+ seq_write(m, tmpbuf, escd_size);
kfree(tmpbuf);
- *start = buf;
- return n;
+ return 0;
}
-static int proc_read_legacyres(char *buf, char **start, off_t pos,
- int count, int *eof, void *data)
+static int escd_proc_open(struct inode *inode, struct file *file)
{
- /* Assume that the following won't overflow the buffer */
- if (pnp_bios_get_stat_res(buf))
- return -EIO;
-
- return count; // FIXME: Return actual length
+ return single_open(file, escd_proc_show, NULL);
}
-static int proc_read_devices(char *buf, char **start, off_t pos,
- int count, int *eof, void *data)
+static const struct file_operations escd_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = escd_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int pnp_legacyres_proc_show(struct seq_file *m, void *v)
+{
+ void *buf;
+
+ buf = kmalloc(65536, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ if (pnp_bios_get_stat_res(buf)) {
+ kfree(buf);
+ return -EIO;
+ }
+
+ seq_write(m, buf, 65536);
+ kfree(buf);
+ return 0;
+}
+
+static int pnp_legacyres_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pnp_legacyres_proc_show, NULL);
+}
+
+static const struct file_operations pnp_legacyres_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = pnp_legacyres_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int pnp_devices_proc_show(struct seq_file *m, void *v)
{
struct pnp_bios_node *node;
u8 nodenum;
- char *p = buf;
-
- if (pos >= 0xff)
- return 0;
node = kzalloc(node_info.max_node_size, GFP_KERNEL);
if (!node)
return -ENOMEM;
- for (nodenum = pos; nodenum < 0xff;) {
+ for (nodenum = 0; nodenum < 0xff;) {
u8 thisnodenum = nodenum;
- /* 26 = the number of characters per line sprintf'ed */
- if ((p - buf + 26) > count)
- break;
+
if (pnp_bios_get_dev_node(&nodenum, PNPMODE_DYNAMIC, node))
break;
- p += sprintf(p, "%02x\t%08x\t%02x:%02x:%02x\t%04x\n",
+ seq_printf(m, "%02x\t%08x\t%02x:%02x:%02x\t%04x\n",
node->handle, node->eisa_id,
node->type_code[0], node->type_code[1],
node->type_code[2], node->flags);
@@ -153,20 +196,29 @@ static int proc_read_devices(char *buf, char **start, off_t pos,
"PnPBIOS: proc_read_devices:",
(unsigned int)nodenum,
(unsigned int)thisnodenum);
- *eof = 1;
break;
}
}
kfree(node);
- if (nodenum == 0xff)
- *eof = 1;
- *start = (char *)((off_t) nodenum - pos);
- return p - buf;
+ return 0;
}
-static int proc_read_node(char *buf, char **start, off_t pos,
- int count, int *eof, void *data)
+static int pnp_devices_proc_open(struct inode *inode, struct file *file)
{
+ return single_open(file, pnp_devices_proc_show, NULL);
+}
+
+static const struct file_operations pnp_devices_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = pnp_devices_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int pnpbios_proc_show(struct seq_file *m, void *v)
+{
+ void *data = m->private;
struct pnp_bios_node *node;
int boot = (long)data >> 8;
u8 nodenum = (long)data;
@@ -180,14 +232,20 @@ static int proc_read_node(char *buf, char **start, off_t pos,
return -EIO;
}
len = node->size - sizeof(struct pnp_bios_node);
- memcpy(buf, node->data, len);
+ seq_write(m, node->data, len);
kfree(node);
- return len;
+ return 0;
}
-static int proc_write_node(struct file *file, const char __user * buf,
- unsigned long count, void *data)
+static int pnpbios_proc_open(struct inode *inode, struct file *file)
{
+ return single_open(file, pnpbios_proc_show, PDE(inode)->data);
+}
+
+static ssize_t pnpbios_proc_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ void *data = PDE(file->f_path.dentry->d_inode)->data;
struct pnp_bios_node *node;
int boot = (long)data >> 8;
u8 nodenum = (long)data;
@@ -218,34 +276,33 @@ out:
return ret;
}
+static const struct file_operations pnpbios_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = pnpbios_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = pnpbios_proc_write,
+};
+
int pnpbios_interface_attach_device(struct pnp_bios_node *node)
{
char name[3];
- struct proc_dir_entry *ent;
sprintf(name, "%02x", node->handle);
if (!proc_pnp)
return -EIO;
if (!pnpbios_dont_use_current_config) {
- ent = create_proc_entry(name, 0, proc_pnp);
- if (ent) {
- ent->read_proc = proc_read_node;
- ent->write_proc = proc_write_node;
- ent->data = (void *)(long)(node->handle);
- }
+ proc_create_data(name, 0644, proc_pnp, &pnpbios_proc_fops,
+ (void *)(long)(node->handle));
}
if (!proc_pnp_boot)
return -EIO;
- ent = create_proc_entry(name, 0, proc_pnp_boot);
- if (ent) {
- ent->read_proc = proc_read_node;
- ent->write_proc = proc_write_node;
- ent->data = (void *)(long)(node->handle + 0x100);
+ if (proc_create_data(name, 0644, proc_pnp_boot, &pnpbios_proc_fops,
+ (void *)(long)(node->handle + 0x100)))
return 0;
- }
-
return -EIO;
}
@@ -262,14 +319,11 @@ int __init pnpbios_proc_init(void)
proc_pnp_boot = proc_mkdir("boot", proc_pnp);
if (!proc_pnp_boot)
return -EIO;
- create_proc_read_entry("devices", 0, proc_pnp, proc_read_devices, NULL);
- create_proc_read_entry("configuration_info", 0, proc_pnp,
- proc_read_pnpconfig, NULL);
- create_proc_read_entry("escd_info", 0, proc_pnp, proc_read_escdinfo,
- NULL);
- create_proc_read_entry("escd", S_IRUSR, proc_pnp, proc_read_escd, NULL);
- create_proc_read_entry("legacy_device_resources", 0, proc_pnp,
- proc_read_legacyres, NULL);
+ proc_create("devices", 0, proc_pnp, &pnp_devices_proc_fops);
+ proc_create("configuration_info", 0, proc_pnp, &pnpconfig_proc_fops);
+ proc_create("escd_info", 0, proc_pnp, &escd_info_proc_fops);
+ proc_create("escd", S_IRUSR, proc_pnp, &escd_proc_fops);
+ proc_create("legacy_device_resources", 0, proc_pnp, &pnp_legacyres_proc_fops);
return 0;
}
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 71fbd6e8edf..8167e9e6827 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -242,6 +242,15 @@ config RTC_DRV_M41T80_WDT
If you say Y here you will get support for the
watchdog timer in the ST M41T60 and M41T80 RTC chips series.
+config RTC_DRV_BQ32K
+ tristate "TI BQ32000"
+ help
+ If you say Y here you will get support for the TI
+ BQ32000 I2C RTC chip.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-bq32k.
+
config RTC_DRV_DM355EVM
tristate "TI DaVinci DM355 EVM RTC"
depends on MFD_DM355EVM_MSP
@@ -592,15 +601,22 @@ config RTC_DRV_AB3100
Select this to enable the ST-Ericsson AB3100 Mixed Signal IC RTC
support. This chip contains a battery- and capacitor-backed RTC.
+config RTC_DRV_NUC900
+ tristate "NUC910/NUC920 RTC driver"
+ depends on RTC_CLASS && ARCH_W90X900
+ help
+ If you say yes here you get support for the RTC subsystem of the
+ NUC910/NUC920 used in embedded systems.
comment "on-CPU RTC drivers"
config RTC_DRV_OMAP
tristate "TI OMAP1"
- depends on ARCH_OMAP15XX || ARCH_OMAP16XX || ARCH_OMAP730
+ depends on ARCH_OMAP15XX || ARCH_OMAP16XX || ARCH_OMAP730 || ARCH_DAVINCI_DA8XX
help
- Say "yes" here to support the real time clock on TI OMAP1 chips.
- This driver can also be built as a module called rtc-omap.
+ Say "yes" here to support the real time clock on TI OMAP1 and
+ DA8xx/OMAP-L13x chips. This driver can also be built as a
+ module called rtc-omap.
config RTC_DRV_S3C
tristate "Samsung S3C series SoC RTC"
@@ -846,4 +862,10 @@ config RTC_DRV_PCAP
If you say Y here you will get support for the RTC found on
the PCAP2 ASIC used on some Motorola phones.
+config RTC_DRV_MC13783
+ depends on MFD_MC13783
+ tristate "Freescale MC13783 RTC"
+ help
+ This enables support for the Freescale MC13783 PMIC RTC
+
endif # RTC_CLASS
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 7da6efb3e95..e5160fddc44 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_RTC_DRV_AT91RM9200)+= rtc-at91rm9200.o
obj-$(CONFIG_RTC_DRV_AT91SAM9) += rtc-at91sam9.o
obj-$(CONFIG_RTC_DRV_AU1XXX) += rtc-au1xxx.o
obj-$(CONFIG_RTC_DRV_BFIN) += rtc-bfin.o
+obj-$(CONFIG_RTC_DRV_BQ32K) += rtc-bq32k.o
obj-$(CONFIG_RTC_DRV_BQ4802) += rtc-bq4802.o
obj-$(CONFIG_RTC_DRV_CMOS) += rtc-cmos.o
obj-$(CONFIG_RTC_DRV_COH901331) += rtc-coh901331.o
@@ -52,8 +53,10 @@ obj-$(CONFIG_RTC_DRV_M48T86) += rtc-m48t86.o
obj-$(CONFIG_RTC_MXC) += rtc-mxc.o
obj-$(CONFIG_RTC_DRV_MAX6900) += rtc-max6900.o
obj-$(CONFIG_RTC_DRV_MAX6902) += rtc-max6902.o
+obj-$(CONFIG_RTC_DRV_MC13783) += rtc-mc13783.o
obj-$(CONFIG_RTC_DRV_MSM6242) += rtc-msm6242.o
obj-$(CONFIG_RTC_DRV_MV) += rtc-mv.o
+obj-$(CONFIG_RTC_DRV_NUC900) += rtc-nuc900.o
obj-$(CONFIG_RTC_DRV_OMAP) += rtc-omap.o
obj-$(CONFIG_RTC_DRV_PCAP) += rtc-pcap.o
obj-$(CONFIG_RTC_DRV_PCF8563) += rtc-pcf8563.o
diff --git a/drivers/rtc/rtc-at32ap700x.c b/drivers/rtc/rtc-at32ap700x.c
index e1ec33e40e3..8825695777d 100644
--- a/drivers/rtc/rtc-at32ap700x.c
+++ b/drivers/rtc/rtc-at32ap700x.c
@@ -256,6 +256,8 @@ static int __init at32_rtc_probe(struct platform_device *pdev)
goto out_iounmap;
}
+ platform_set_drvdata(pdev, rtc);
+
rtc->rtc = rtc_device_register(pdev->name, &pdev->dev,
&at32_rtc_ops, THIS_MODULE);
if (IS_ERR(rtc->rtc)) {
@@ -264,7 +266,6 @@ static int __init at32_rtc_probe(struct platform_device *pdev)
goto out_free_irq;
}
- platform_set_drvdata(pdev, rtc);
device_init_wakeup(&pdev->dev, 1);
dev_info(&pdev->dev, "Atmel RTC for AT32AP700x at %08lx irq %ld\n",
@@ -273,6 +274,7 @@ static int __init at32_rtc_probe(struct platform_device *pdev)
return 0;
out_free_irq:
+ platform_set_drvdata(pdev, NULL);
free_irq(irq, rtc);
out_iounmap:
iounmap(rtc->regs);
diff --git a/drivers/rtc/rtc-bq32k.c b/drivers/rtc/rtc-bq32k.c
new file mode 100644
index 00000000000..408cc8f735b
--- /dev/null
+++ b/drivers/rtc/rtc-bq32k.c
@@ -0,0 +1,204 @@
+/*
+ * Driver for TI BQ32000 RTC.
+ *
+ * Copyright (C) 2009 Semihalf.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include
+
+#define BQ32K_SECONDS 0x00 /* Seconds register address */
+#define BQ32K_SECONDS_MASK 0x7F /* Mask over seconds value */
+#define BQ32K_STOP 0x80 /* Oscillator Stop flat */
+
+#define BQ32K_MINUTES 0x01 /* Minutes register address */
+#define BQ32K_MINUTES_MASK 0x7F /* Mask over minutes value */
+#define BQ32K_OF 0x80 /* Oscillator Failure flag */
+
+#define BQ32K_HOURS_MASK 0x3F /* Mask over hours value */
+#define BQ32K_CENT 0x40 /* Century flag */
+#define BQ32K_CENT_EN 0x80 /* Century flag enable bit */
+
+struct bq32k_regs {
+ uint8_t seconds;
+ uint8_t minutes;
+ uint8_t cent_hours;
+ uint8_t day;
+ uint8_t date;
+ uint8_t month;
+ uint8_t years;
+};
+
+static struct i2c_driver bq32k_driver;
+
+static int bq32k_read(struct device *dev, void *data, uint8_t off, uint8_t len)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct i2c_msg msgs[] = {
+ {
+ .addr = client->addr,
+ .flags = 0,
+ .len = 1,
+ .buf = &off,
+ }, {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = len,
+ .buf = data,
+ }
+ };
+
+ if (i2c_transfer(client->adapter, msgs, 2) == 2)
+ return 0;
+
+ return -EIO;
+}
+
+static int bq32k_write(struct device *dev, void *data, uint8_t off, uint8_t len)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ uint8_t buffer[len + 1];
+
+ buffer[0] = off;
+ memcpy(&buffer[1], data, len);
+
+ if (i2c_master_send(client, buffer, len + 1) == len + 1)
+ return 0;
+
+ return -EIO;
+}
+
+static int bq32k_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct bq32k_regs regs;
+ int error;
+
+ error = bq32k_read(dev, ®s, 0, sizeof(regs));
+ if (error)
+ return error;
+
+ tm->tm_sec = bcd2bin(regs.seconds & BQ32K_SECONDS_MASK);
+ tm->tm_min = bcd2bin(regs.minutes & BQ32K_SECONDS_MASK);
+ tm->tm_hour = bcd2bin(regs.cent_hours & BQ32K_HOURS_MASK);
+ tm->tm_mday = bcd2bin(regs.date);
+ tm->tm_wday = bcd2bin(regs.day) - 1;
+ tm->tm_mon = bcd2bin(regs.month) - 1;
+ tm->tm_year = bcd2bin(regs.years) +
+ ((regs.cent_hours & BQ32K_CENT) ? 100 : 0);
+
+ return rtc_valid_tm(tm);
+}
+
+static int bq32k_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct bq32k_regs regs;
+
+ regs.seconds = bin2bcd(tm->tm_sec);
+ regs.minutes = bin2bcd(tm->tm_min);
+ regs.cent_hours = bin2bcd(tm->tm_hour) | BQ32K_CENT_EN;
+ regs.day = bin2bcd(tm->tm_wday + 1);
+ regs.date = bin2bcd(tm->tm_mday);
+ regs.month = bin2bcd(tm->tm_mon + 1);
+
+ if (tm->tm_year >= 100) {
+ regs.cent_hours |= BQ32K_CENT;
+ regs.years = bin2bcd(tm->tm_year - 100);
+ } else
+ regs.years = bin2bcd(tm->tm_year);
+
+ return bq32k_write(dev, ®s, 0, sizeof(regs));
+}
+
+static const struct rtc_class_ops bq32k_rtc_ops = {
+ .read_time = bq32k_rtc_read_time,
+ .set_time = bq32k_rtc_set_time,
+};
+
+static int bq32k_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct rtc_device *rtc;
+ uint8_t reg;
+ int error;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+ return -ENODEV;
+
+ /* Check Oscillator Stop flag */
+ error = bq32k_read(dev, ®, BQ32K_SECONDS, 1);
+ if (!error && (reg & BQ32K_STOP)) {
+ dev_warn(dev, "Oscillator was halted. Restarting...\n");
+ reg &= ~BQ32K_STOP;
+ error = bq32k_write(dev, ®, BQ32K_SECONDS, 1);
+ }
+ if (error)
+ return error;
+
+ /* Check Oscillator Failure flag */
+ error = bq32k_read(dev, ®, BQ32K_MINUTES, 1);
+ if (!error && (reg & BQ32K_OF)) {
+ dev_warn(dev, "Oscillator Failure. Check RTC battery.\n");
+ reg &= ~BQ32K_OF;
+ error = bq32k_write(dev, ®, BQ32K_MINUTES, 1);
+ }
+ if (error)
+ return error;
+
+ rtc = rtc_device_register(bq32k_driver.driver.name, &client->dev,
+ &bq32k_rtc_ops, THIS_MODULE);
+ if (IS_ERR(rtc))
+ return PTR_ERR(rtc);
+
+ i2c_set_clientdata(client, rtc);
+
+ return 0;
+}
+
+static int __devexit bq32k_remove(struct i2c_client *client)
+{
+ struct rtc_device *rtc = i2c_get_clientdata(client);
+
+ rtc_device_unregister(rtc);
+ return 0;
+}
+
+static const struct i2c_device_id bq32k_id[] = {
+ { "bq32000", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, bq32k_id);
+
+static struct i2c_driver bq32k_driver = {
+ .driver = {
+ .name = "bq32k",
+ .owner = THIS_MODULE,
+ },
+ .probe = bq32k_probe,
+ .remove = __devexit_p(bq32k_remove),
+ .id_table = bq32k_id,
+};
+
+static __init int bq32k_init(void)
+{
+ return i2c_add_driver(&bq32k_driver);
+}
+module_init(bq32k_init);
+
+static __exit void bq32k_exit(void)
+{
+ i2c_del_driver(&bq32k_driver);
+}
+module_exit(bq32k_exit);
+
+MODULE_AUTHOR("Semihalf, Piotr Ziecik ");
+MODULE_DESCRIPTION("TI BQ32000 I2C RTC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-bq4802.c b/drivers/rtc/rtc-bq4802.c
index d00a274df8f..280fe48ada0 100644
--- a/drivers/rtc/rtc-bq4802.c
+++ b/drivers/rtc/rtc-bq4802.c
@@ -169,6 +169,8 @@ static int __devinit bq4802_probe(struct platform_device *pdev)
goto out_free;
}
+ platform_set_drvdata(pdev, p);
+
p->rtc = rtc_device_register("bq4802", &pdev->dev,
&bq4802_ops, THIS_MODULE);
if (IS_ERR(p->rtc)) {
@@ -176,7 +178,6 @@ static int __devinit bq4802_probe(struct platform_device *pdev)
goto out_iounmap;
}
- platform_set_drvdata(pdev, p);
err = 0;
out:
return err;
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index f7a4701bf86..eb154dc5716 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -420,49 +420,43 @@ static int cmos_irq_set_state(struct device *dev, int enabled)
return 0;
}
-#if defined(CONFIG_RTC_INTF_DEV) || defined(CONFIG_RTC_INTF_DEV_MODULE)
-
-static int
-cmos_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
+static int cmos_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct cmos_rtc *cmos = dev_get_drvdata(dev);
unsigned long flags;
- switch (cmd) {
- case RTC_AIE_OFF:
- case RTC_AIE_ON:
- case RTC_UIE_OFF:
- case RTC_UIE_ON:
- if (!is_valid_irq(cmos->irq))
- return -EINVAL;
- break;
- /* PIE ON/OFF is handled by cmos_irq_set_state() */
- default:
- return -ENOIOCTLCMD;
- }
+ if (!is_valid_irq(cmos->irq))
+ return -EINVAL;
spin_lock_irqsave(&rtc_lock, flags);
- switch (cmd) {
- case RTC_AIE_OFF: /* alarm off */
- cmos_irq_disable(cmos, RTC_AIE);
- break;
- case RTC_AIE_ON: /* alarm on */
+
+ if (enabled)
cmos_irq_enable(cmos, RTC_AIE);
- break;
- case RTC_UIE_OFF: /* update off */
- cmos_irq_disable(cmos, RTC_UIE);
- break;
- case RTC_UIE_ON: /* update on */
- cmos_irq_enable(cmos, RTC_UIE);
- break;
- }
+ else
+ cmos_irq_disable(cmos, RTC_AIE);
+
spin_unlock_irqrestore(&rtc_lock, flags);
return 0;
}
-#else
-#define cmos_rtc_ioctl NULL
-#endif
+static int cmos_update_irq_enable(struct device *dev, unsigned int enabled)
+{
+ struct cmos_rtc *cmos = dev_get_drvdata(dev);
+ unsigned long flags;
+
+ if (!is_valid_irq(cmos->irq))
+ return -EINVAL;
+
+ spin_lock_irqsave(&rtc_lock, flags);
+
+ if (enabled)
+ cmos_irq_enable(cmos, RTC_UIE);
+ else
+ cmos_irq_disable(cmos, RTC_UIE);
+
+ spin_unlock_irqrestore(&rtc_lock, flags);
+ return 0;
+}
#if defined(CONFIG_RTC_INTF_PROC) || defined(CONFIG_RTC_INTF_PROC_MODULE)
@@ -503,14 +497,15 @@ static int cmos_procfs(struct device *dev, struct seq_file *seq)
#endif
static const struct rtc_class_ops cmos_rtc_ops = {
- .ioctl = cmos_rtc_ioctl,
- .read_time = cmos_read_time,
- .set_time = cmos_set_time,
- .read_alarm = cmos_read_alarm,
- .set_alarm = cmos_set_alarm,
- .proc = cmos_procfs,
- .irq_set_freq = cmos_irq_set_freq,
- .irq_set_state = cmos_irq_set_state,
+ .read_time = cmos_read_time,
+ .set_time = cmos_set_time,
+ .read_alarm = cmos_read_alarm,
+ .set_alarm = cmos_set_alarm,
+ .proc = cmos_procfs,
+ .irq_set_freq = cmos_irq_set_freq,
+ .irq_set_state = cmos_irq_set_state,
+ .alarm_irq_enable = cmos_alarm_irq_enable,
+ .update_irq_enable = cmos_update_irq_enable,
};
/*----------------------------------------------------------------*/
@@ -871,8 +866,9 @@ static int cmos_suspend(struct device *dev, pm_message_t mesg)
mask = RTC_IRQMASK;
tmp &= ~mask;
CMOS_WRITE(tmp, RTC_CONTROL);
- hpet_mask_rtc_irq_bit(mask);
+ /* shut down hpet emulation - we don't need it for alarm */
+ hpet_mask_rtc_irq_bit(RTC_PIE|RTC_AIE|RTC_UIE);
cmos_checkintr(cmos, tmp);
}
spin_unlock_irq(&rtc_lock);
diff --git a/drivers/rtc/rtc-ds1302.c b/drivers/rtc/rtc-ds1302.c
index 1e73c8f42e3..532acf9b05d 100644
--- a/drivers/rtc/rtc-ds1302.c
+++ b/drivers/rtc/rtc-ds1302.c
@@ -143,7 +143,6 @@ static int ds1302_rtc_ioctl(struct device *dev, unsigned int cmd,
#ifdef RTC_SET_CHARGE
case RTC_SET_CHARGE:
{
- struct ds1302_rtc *rtc = dev_get_drvdata(dev);
int tcs_val;
if (copy_from_user(&tcs_val, (int __user *)arg, sizeof(int)))
diff --git a/drivers/rtc/rtc-ds1305.c b/drivers/rtc/rtc-ds1305.c
index 2736b11a1b1..259db7f3535 100644
--- a/drivers/rtc/rtc-ds1305.c
+++ b/drivers/rtc/rtc-ds1305.c
@@ -617,7 +617,6 @@ static struct bin_attribute nvram = {
static int __devinit ds1305_probe(struct spi_device *spi)
{
struct ds1305 *ds1305;
- struct rtc_device *rtc;
int status;
u8 addr, value;
struct ds1305_platform_data *pdata = spi->dev.platform_data;
@@ -756,14 +755,13 @@ static int __devinit ds1305_probe(struct spi_device *spi)
dev_dbg(&spi->dev, "AM/PM\n");
/* register RTC ... from here on, ds1305->ctrl needs locking */
- rtc = rtc_device_register("ds1305", &spi->dev,
+ ds1305->rtc = rtc_device_register("ds1305", &spi->dev,
&ds1305_ops, THIS_MODULE);
- if (IS_ERR(rtc)) {
- status = PTR_ERR(rtc);
+ if (IS_ERR(ds1305->rtc)) {
+ status = PTR_ERR(ds1305->rtc);
dev_dbg(&spi->dev, "register rtc --> %d\n", status);
goto fail0;
}
- ds1305->rtc = rtc;
/* Maybe set up alarm IRQ; be ready to handle it triggering right
* away. NOTE that we don't share this. The signal is active low,
@@ -774,7 +772,7 @@ static int __devinit ds1305_probe(struct spi_device *spi)
if (spi->irq) {
INIT_WORK(&ds1305->work, ds1305_work);
status = request_irq(spi->irq, ds1305_irq,
- 0, dev_name(&rtc->dev), ds1305);
+ 0, dev_name(&ds1305->rtc->dev), ds1305);
if (status < 0) {
dev_dbg(&spi->dev, "request_irq %d --> %d\n",
spi->irq, status);
@@ -794,7 +792,7 @@ static int __devinit ds1305_probe(struct spi_device *spi)
fail2:
free_irq(spi->irq, ds1305);
fail1:
- rtc_device_unregister(rtc);
+ rtc_device_unregister(ds1305->rtc);
fail0:
kfree(ds1305);
return status;
@@ -802,7 +800,7 @@ fail0:
static int __devexit ds1305_remove(struct spi_device *spi)
{
- struct ds1305 *ds1305 = spi_get_drvdata(spi);
+ struct ds1305 *ds1305 = spi_get_drvdata(spi);
sysfs_remove_bin_file(&spi->dev.kobj, &nvram);
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index eb99ee4fa0f..8a99da6f2f2 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -874,7 +874,7 @@ read_rtc:
}
if (want_irq) {
- err = request_irq(client->irq, ds1307_irq, 0,
+ err = request_irq(client->irq, ds1307_irq, IRQF_SHARED,
ds1307->rtc->name, client);
if (err) {
dev_err(&client->dev,
diff --git a/drivers/rtc/rtc-ds1511.c b/drivers/rtc/rtc-ds1511.c
index 539676e25fd..4166b84cb51 100644
--- a/drivers/rtc/rtc-ds1511.c
+++ b/drivers/rtc/rtc-ds1511.c
@@ -87,7 +87,6 @@ enum ds1511reg {
struct rtc_plat_data {
struct rtc_device *rtc;
void __iomem *ioaddr; /* virtual base address */
- unsigned long baseaddr; /* physical base address */
int size; /* amount of memory mapped */
int irq;
unsigned int irqen;
@@ -95,6 +94,7 @@ struct rtc_plat_data {
int alrm_min;
int alrm_hour;
int alrm_mday;
+ spinlock_t lock;
};
static DEFINE_SPINLOCK(ds1511_lock);
@@ -302,7 +302,7 @@ ds1511_rtc_update_alarm(struct rtc_plat_data *pdata)
{
unsigned long flags;
- spin_lock_irqsave(&pdata->rtc->irq_lock, flags);
+ spin_lock_irqsave(&pdata->lock, flags);
rtc_write(pdata->alrm_mday < 0 || (pdata->irqen & RTC_UF) ?
0x80 : bin2bcd(pdata->alrm_mday) & 0x3f,
RTC_ALARM_DATE);
@@ -317,7 +317,7 @@ ds1511_rtc_update_alarm(struct rtc_plat_data *pdata)
RTC_ALARM_SEC);
rtc_write(rtc_read(RTC_CMD) | (pdata->irqen ? RTC_TIE : 0), RTC_CMD);
rtc_read(RTC_CMD1); /* clear interrupts */
- spin_unlock_irqrestore(&pdata->rtc->irq_lock, flags);
+ spin_unlock_irqrestore(&pdata->lock, flags);
}
static int
@@ -362,61 +362,63 @@ ds1511_interrupt(int irq, void *dev_id)
{
struct platform_device *pdev = dev_id;
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
- unsigned long events = RTC_IRQF;
+ unsigned long events = 0;
+ spin_lock(&pdata->lock);
/*
* read and clear interrupt
*/
- if (!(rtc_read(RTC_CMD1) & DS1511_IRQF)) {
- return IRQ_NONE;
+ if (rtc_read(RTC_CMD1) & DS1511_IRQF) {
+ events = RTC_IRQF;
+ if (rtc_read(RTC_ALARM_SEC) & 0x80)
+ events |= RTC_UF;
+ else
+ events |= RTC_AF;
+ if (likely(pdata->rtc))
+ rtc_update_irq(pdata->rtc, 1, events);
}
- if (rtc_read(RTC_ALARM_SEC) & 0x80) {
- events |= RTC_UF;
- } else {
- events |= RTC_AF;
- }
- rtc_update_irq(pdata->rtc, 1, events);
- return IRQ_HANDLED;
+ spin_unlock(&pdata->lock);
+ return events ? IRQ_HANDLED : IRQ_NONE;
}
- static int
-ds1511_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
+static int ds1511_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct platform_device *pdev = to_platform_device(dev);
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
- if (pdata->irq <= 0) {
- return -ENOIOCTLCMD; /* fall back into rtc-dev's emulation */
- }
- switch (cmd) {
- case RTC_AIE_OFF:
- pdata->irqen &= ~RTC_AF;
- ds1511_rtc_update_alarm(pdata);
- break;
- case RTC_AIE_ON:
+ if (pdata->irq <= 0)
+ return -EINVAL;
+ if (enabled)
pdata->irqen |= RTC_AF;
- ds1511_rtc_update_alarm(pdata);
- break;
- case RTC_UIE_OFF:
- pdata->irqen &= ~RTC_UF;
- ds1511_rtc_update_alarm(pdata);
- break;
- case RTC_UIE_ON:
+ else
+ pdata->irqen &= ~RTC_AF;
+ ds1511_rtc_update_alarm(pdata);
+ return 0;
+}
+
+static int ds1511_rtc_update_irq_enable(struct device *dev,
+ unsigned int enabled)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+
+ if (pdata->irq <= 0)
+ return -EINVAL;
+ if (enabled)
pdata->irqen |= RTC_UF;
- ds1511_rtc_update_alarm(pdata);
- break;
- default:
- return -ENOIOCTLCMD;
- }
+ else
+ pdata->irqen &= ~RTC_UF;
+ ds1511_rtc_update_alarm(pdata);
return 0;
}
static const struct rtc_class_ops ds1511_rtc_ops = {
- .read_time = ds1511_rtc_read_time,
- .set_time = ds1511_rtc_set_time,
- .read_alarm = ds1511_rtc_read_alarm,
- .set_alarm = ds1511_rtc_set_alarm,
- .ioctl = ds1511_rtc_ioctl,
+ .read_time = ds1511_rtc_read_time,
+ .set_time = ds1511_rtc_set_time,
+ .read_alarm = ds1511_rtc_read_alarm,
+ .set_alarm = ds1511_rtc_set_alarm,
+ .alarm_irq_enable = ds1511_rtc_alarm_irq_enable,
+ .update_irq_enable = ds1511_rtc_update_irq_enable,
};
static ssize_t
@@ -492,29 +494,23 @@ ds1511_rtc_probe(struct platform_device *pdev)
{
struct rtc_device *rtc;
struct resource *res;
- struct rtc_plat_data *pdata = NULL;
+ struct rtc_plat_data *pdata;
int ret = 0;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
return -ENODEV;
}
- pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
- if (!pdata) {
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
return -ENOMEM;
- }
pdata->size = res->end - res->start + 1;
- if (!request_mem_region(res->start, pdata->size, pdev->name)) {
- ret = -EBUSY;
- goto out;
- }
- pdata->baseaddr = res->start;
- pdata->size = pdata->size;
- ds1511_base = ioremap(pdata->baseaddr, pdata->size);
- if (!ds1511_base) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!devm_request_mem_region(&pdev->dev, res->start, pdata->size,
+ pdev->name))
+ return -EBUSY;
+ ds1511_base = devm_ioremap(&pdev->dev, res->start, pdata->size);
+ if (!ds1511_base)
+ return -ENOMEM;
pdata->ioaddr = ds1511_base;
pdata->irq = platform_get_irq(pdev, 0);
@@ -540,13 +536,15 @@ ds1511_rtc_probe(struct platform_device *pdev)
dev_warn(&pdev->dev, "voltage-low detected.\n");
}
+ spin_lock_init(&pdata->lock);
+ platform_set_drvdata(pdev, pdata);
/*
* if the platform has an interrupt in mind for this device,
* then by all means, set it
*/
if (pdata->irq > 0) {
rtc_read(RTC_CMD1);
- if (request_irq(pdata->irq, ds1511_interrupt,
+ if (devm_request_irq(&pdev->dev, pdata->irq, ds1511_interrupt,
IRQF_DISABLED | IRQF_SHARED, pdev->name, pdev) < 0) {
dev_warn(&pdev->dev, "interrupt not available.\n");
@@ -556,33 +554,13 @@ ds1511_rtc_probe(struct platform_device *pdev)
rtc = rtc_device_register(pdev->name, &pdev->dev, &ds1511_rtc_ops,
THIS_MODULE);
- if (IS_ERR(rtc)) {
- ret = PTR_ERR(rtc);
- goto out;
- }
+ if (IS_ERR(rtc))
+ return PTR_ERR(rtc);
pdata->rtc = rtc;
- platform_set_drvdata(pdev, pdata);
- ret = sysfs_create_bin_file(&pdev->dev.kobj, &ds1511_nvram_attr);
- if (ret) {
- goto out;
- }
- return 0;
- out:
- if (pdata->rtc) {
- rtc_device_unregister(pdata->rtc);
- }
- if (pdata->irq > 0) {
- free_irq(pdata->irq, pdev);
- }
- if (ds1511_base) {
- iounmap(ds1511_base);
- ds1511_base = NULL;
- }
- if (pdata->baseaddr) {
- release_mem_region(pdata->baseaddr, pdata->size);
- }
- kfree(pdata);
+ ret = sysfs_create_bin_file(&pdev->dev.kobj, &ds1511_nvram_attr);
+ if (ret)
+ rtc_device_unregister(pdata->rtc);
return ret;
}
@@ -593,19 +571,13 @@ ds1511_rtc_remove(struct platform_device *pdev)
sysfs_remove_bin_file(&pdev->dev.kobj, &ds1511_nvram_attr);
rtc_device_unregister(pdata->rtc);
- pdata->rtc = NULL;
if (pdata->irq > 0) {
/*
* disable the alarm interrupt
*/
rtc_write(rtc_read(RTC_CMD) & ~RTC_TIE, RTC_CMD);
rtc_read(RTC_CMD1);
- free_irq(pdata->irq, pdev);
}
- iounmap(pdata->ioaddr);
- ds1511_base = NULL;
- release_mem_region(pdata->baseaddr, pdata->size);
- kfree(pdata);
return 0;
}
diff --git a/drivers/rtc/rtc-ds1553.c b/drivers/rtc/rtc-ds1553.c
index 717288527c6..ed1ef7c9cc0 100644
--- a/drivers/rtc/rtc-ds1553.c
+++ b/drivers/rtc/rtc-ds1553.c
@@ -18,7 +18,7 @@
#include
#include
-#define DRV_VERSION "0.2"
+#define DRV_VERSION "0.3"
#define RTC_REG_SIZE 0x2000
#define RTC_OFFSET 0x1ff0
@@ -61,7 +61,6 @@
struct rtc_plat_data {
struct rtc_device *rtc;
void __iomem *ioaddr;
- resource_size_t baseaddr;
unsigned long last_jiffies;
int irq;
unsigned int irqen;
@@ -69,6 +68,7 @@ struct rtc_plat_data {
int alrm_min;
int alrm_hour;
int alrm_mday;
+ spinlock_t lock;
};
static int ds1553_rtc_set_time(struct device *dev, struct rtc_time *tm)
@@ -139,7 +139,7 @@ static void ds1553_rtc_update_alarm(struct rtc_plat_data *pdata)
void __iomem *ioaddr = pdata->ioaddr;
unsigned long flags;
- spin_lock_irqsave(&pdata->rtc->irq_lock, flags);
+ spin_lock_irqsave(&pdata->lock, flags);
writeb(pdata->alrm_mday < 0 || (pdata->irqen & RTC_UF) ?
0x80 : bin2bcd(pdata->alrm_mday),
ioaddr + RTC_DATE_ALARM);
@@ -154,7 +154,7 @@ static void ds1553_rtc_update_alarm(struct rtc_plat_data *pdata)
ioaddr + RTC_SECONDS_ALARM);
writeb(pdata->irqen ? RTC_INTS_AE : 0, ioaddr + RTC_INTERRUPTS);
readb(ioaddr + RTC_FLAGS); /* clear interrupts */
- spin_unlock_irqrestore(&pdata->rtc->irq_lock, flags);
+ spin_unlock_irqrestore(&pdata->lock, flags);
}
static int ds1553_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
@@ -194,64 +194,69 @@ static irqreturn_t ds1553_rtc_interrupt(int irq, void *dev_id)
struct platform_device *pdev = dev_id;
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
void __iomem *ioaddr = pdata->ioaddr;
- unsigned long events = RTC_IRQF;
+ unsigned long events = 0;
+ spin_lock(&pdata->lock);
/* read and clear interrupt */
- if (!(readb(ioaddr + RTC_FLAGS) & RTC_FLAGS_AF))
- return IRQ_NONE;
- if (readb(ioaddr + RTC_SECONDS_ALARM) & 0x80)
- events |= RTC_UF;
- else
- events |= RTC_AF;
- rtc_update_irq(pdata->rtc, 1, events);
- return IRQ_HANDLED;
+ if (readb(ioaddr + RTC_FLAGS) & RTC_FLAGS_AF) {
+ events = RTC_IRQF;
+ if (readb(ioaddr + RTC_SECONDS_ALARM) & 0x80)
+ events |= RTC_UF;
+ else
+ events |= RTC_AF;
+ if (likely(pdata->rtc))
+ rtc_update_irq(pdata->rtc, 1, events);
+ }
+ spin_unlock(&pdata->lock);
+ return events ? IRQ_HANDLED : IRQ_NONE;
}
-static int ds1553_rtc_ioctl(struct device *dev, unsigned int cmd,
- unsigned long arg)
+static int ds1553_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct platform_device *pdev = to_platform_device(dev);
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
if (pdata->irq <= 0)
- return -ENOIOCTLCMD; /* fall back into rtc-dev's emulation */
- switch (cmd) {
- case RTC_AIE_OFF:
- pdata->irqen &= ~RTC_AF;
- ds1553_rtc_update_alarm(pdata);
- break;
- case RTC_AIE_ON:
+ return -EINVAL;
+ if (enabled)
pdata->irqen |= RTC_AF;
- ds1553_rtc_update_alarm(pdata);
- break;
- case RTC_UIE_OFF:
- pdata->irqen &= ~RTC_UF;
- ds1553_rtc_update_alarm(pdata);
- break;
- case RTC_UIE_ON:
+ else
+ pdata->irqen &= ~RTC_AF;
+ ds1553_rtc_update_alarm(pdata);
+ return 0;
+}
+
+static int ds1553_rtc_update_irq_enable(struct device *dev,
+ unsigned int enabled)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+
+ if (pdata->irq <= 0)
+ return -EINVAL;
+ if (enabled)
pdata->irqen |= RTC_UF;
- ds1553_rtc_update_alarm(pdata);
- break;
- default:
- return -ENOIOCTLCMD;
- }
+ else
+ pdata->irqen &= ~RTC_UF;
+ ds1553_rtc_update_alarm(pdata);
return 0;
}
static const struct rtc_class_ops ds1553_rtc_ops = {
- .read_time = ds1553_rtc_read_time,
- .set_time = ds1553_rtc_set_time,
- .read_alarm = ds1553_rtc_read_alarm,
- .set_alarm = ds1553_rtc_set_alarm,
- .ioctl = ds1553_rtc_ioctl,
+ .read_time = ds1553_rtc_read_time,
+ .set_time = ds1553_rtc_set_time,
+ .read_alarm = ds1553_rtc_read_alarm,
+ .set_alarm = ds1553_rtc_set_alarm,
+ .alarm_irq_enable = ds1553_rtc_alarm_irq_enable,
+ .update_irq_enable = ds1553_rtc_update_irq_enable,
};
static ssize_t ds1553_nvram_read(struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t size)
{
- struct platform_device *pdev =
- to_platform_device(container_of(kobj, struct device, kobj));
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct platform_device *pdev = to_platform_device(dev);
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
void __iomem *ioaddr = pdata->ioaddr;
ssize_t count;
@@ -265,8 +270,8 @@ static ssize_t ds1553_nvram_write(struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t size)
{
- struct platform_device *pdev =
- to_platform_device(container_of(kobj, struct device, kobj));
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct platform_device *pdev = to_platform_device(dev);
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
void __iomem *ioaddr = pdata->ioaddr;
ssize_t count;
@@ -291,26 +296,23 @@ static int __devinit ds1553_rtc_probe(struct platform_device *pdev)
struct rtc_device *rtc;
struct resource *res;
unsigned int cen, sec;
- struct rtc_plat_data *pdata = NULL;
- void __iomem *ioaddr = NULL;
+ struct rtc_plat_data *pdata;
+ void __iomem *ioaddr;
int ret = 0;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENODEV;
- pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
- if (!request_mem_region(res->start, RTC_REG_SIZE, pdev->name)) {
- ret = -EBUSY;
- goto out;
- }
- pdata->baseaddr = res->start;
- ioaddr = ioremap(pdata->baseaddr, RTC_REG_SIZE);
- if (!ioaddr) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!devm_request_mem_region(&pdev->dev, res->start, RTC_REG_SIZE,
+ pdev->name))
+ return -EBUSY;
+
+ ioaddr = devm_ioremap(&pdev->dev, res->start, RTC_REG_SIZE);
+ if (!ioaddr)
+ return -ENOMEM;
pdata->ioaddr = ioaddr;
pdata->irq = platform_get_irq(pdev, 0);
@@ -326,9 +328,13 @@ static int __devinit ds1553_rtc_probe(struct platform_device *pdev)
if (readb(ioaddr + RTC_FLAGS) & RTC_FLAGS_BLF)
dev_warn(&pdev->dev, "voltage-low detected.\n");
+ spin_lock_init(&pdata->lock);
+ pdata->last_jiffies = jiffies;
+ platform_set_drvdata(pdev, pdata);
if (pdata->irq > 0) {
writeb(0, ioaddr + RTC_INTERRUPTS);
- if (request_irq(pdata->irq, ds1553_rtc_interrupt,
+ if (devm_request_irq(&pdev->dev, pdata->irq,
+ ds1553_rtc_interrupt,
IRQF_DISABLED, pdev->name, pdev) < 0) {
dev_warn(&pdev->dev, "interrupt not available.\n");
pdata->irq = 0;
@@ -337,27 +343,13 @@ static int __devinit ds1553_rtc_probe(struct platform_device *pdev)
rtc = rtc_device_register(pdev->name, &pdev->dev,
&ds1553_rtc_ops, THIS_MODULE);
- if (IS_ERR(rtc)) {
- ret = PTR_ERR(rtc);
- goto out;
- }
+ if (IS_ERR(rtc))
+ return PTR_ERR(rtc);
pdata->rtc = rtc;
- pdata->last_jiffies = jiffies;
- platform_set_drvdata(pdev, pdata);
+
ret = sysfs_create_bin_file(&pdev->dev.kobj, &ds1553_nvram_attr);
if (ret)
- goto out;
- return 0;
- out:
- if (pdata->rtc)
- rtc_device_unregister(pdata->rtc);
- if (pdata->irq > 0)
- free_irq(pdata->irq, pdev);
- if (ioaddr)
- iounmap(ioaddr);
- if (pdata->baseaddr)
- release_mem_region(pdata->baseaddr, RTC_REG_SIZE);
- kfree(pdata);
+ rtc_device_unregister(rtc);
return ret;
}
@@ -367,13 +359,8 @@ static int __devexit ds1553_rtc_remove(struct platform_device *pdev)
sysfs_remove_bin_file(&pdev->dev.kobj, &ds1553_nvram_attr);
rtc_device_unregister(pdata->rtc);
- if (pdata->irq > 0) {
+ if (pdata->irq > 0)
writeb(0, pdata->ioaddr + RTC_INTERRUPTS);
- free_irq(pdata->irq, pdev);
- }
- iounmap(pdata->ioaddr);
- release_mem_region(pdata->baseaddr, RTC_REG_SIZE);
- kfree(pdata);
return 0;
}
diff --git a/drivers/rtc/rtc-ds1742.c b/drivers/rtc/rtc-ds1742.c
index 09249459e9a..a1273360a44 100644
--- a/drivers/rtc/rtc-ds1742.c
+++ b/drivers/rtc/rtc-ds1742.c
@@ -21,7 +21,7 @@
#include
#include
-#define DRV_VERSION "0.3"
+#define DRV_VERSION "0.4"
#define RTC_SIZE 8
@@ -55,7 +55,6 @@ struct rtc_plat_data {
void __iomem *ioaddr_rtc;
size_t size_nvram;
size_t size;
- resource_size_t baseaddr;
unsigned long last_jiffies;
struct bin_attribute nvram_attr;
};
@@ -132,8 +131,8 @@ static ssize_t ds1742_nvram_read(struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t size)
{
- struct platform_device *pdev =
- to_platform_device(container_of(kobj, struct device, kobj));
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct platform_device *pdev = to_platform_device(dev);
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
void __iomem *ioaddr = pdata->ioaddr_nvram;
ssize_t count;
@@ -147,8 +146,8 @@ static ssize_t ds1742_nvram_write(struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t size)
{
- struct platform_device *pdev =
- to_platform_device(container_of(kobj, struct device, kobj));
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct platform_device *pdev = to_platform_device(dev);
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
void __iomem *ioaddr = pdata->ioaddr_nvram;
ssize_t count;
@@ -163,27 +162,24 @@ static int __devinit ds1742_rtc_probe(struct platform_device *pdev)
struct rtc_device *rtc;
struct resource *res;
unsigned int cen, sec;
- struct rtc_plat_data *pdata = NULL;
- void __iomem *ioaddr = NULL;
+ struct rtc_plat_data *pdata;
+ void __iomem *ioaddr;
int ret = 0;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENODEV;
- pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
pdata->size = res->end - res->start + 1;
- if (!request_mem_region(res->start, pdata->size, pdev->name)) {
- ret = -EBUSY;
- goto out;
- }
- pdata->baseaddr = res->start;
- ioaddr = ioremap(pdata->baseaddr, pdata->size);
- if (!ioaddr) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!devm_request_mem_region(&pdev->dev, res->start, pdata->size,
+ pdev->name))
+ return -EBUSY;
+ ioaddr = devm_ioremap(&pdev->dev, res->start, pdata->size);
+ if (!ioaddr)
+ return -ENOMEM;
+
pdata->ioaddr_nvram = ioaddr;
pdata->size_nvram = pdata->size - RTC_SIZE;
pdata->ioaddr_rtc = ioaddr + pdata->size_nvram;
@@ -207,31 +203,19 @@ static int __devinit ds1742_rtc_probe(struct platform_device *pdev)
if (!(readb(ioaddr + RTC_DAY) & RTC_BATT_FLAG))
dev_warn(&pdev->dev, "voltage-low detected.\n");
- rtc = rtc_device_register(pdev->name, &pdev->dev,
- &ds1742_rtc_ops, THIS_MODULE);
- if (IS_ERR(rtc)) {
- ret = PTR_ERR(rtc);
- goto out;
- }
- pdata->rtc = rtc;
pdata->last_jiffies = jiffies;
platform_set_drvdata(pdev, pdata);
+ rtc = rtc_device_register(pdev->name, &pdev->dev,
+ &ds1742_rtc_ops, THIS_MODULE);
+ if (IS_ERR(rtc))
+ return PTR_ERR(rtc);
+ pdata->rtc = rtc;
ret = sysfs_create_bin_file(&pdev->dev.kobj, &pdata->nvram_attr);
if (ret) {
dev_err(&pdev->dev, "creating nvram file in sysfs failed\n");
- goto out;
+ rtc_device_unregister(rtc);
}
-
- return 0;
- out:
- if (pdata->rtc)
- rtc_device_unregister(pdata->rtc);
- if (pdata->ioaddr_nvram)
- iounmap(pdata->ioaddr_nvram);
- if (pdata->baseaddr)
- release_mem_region(pdata->baseaddr, pdata->size);
- kfree(pdata);
return ret;
}
@@ -241,9 +225,6 @@ static int __devexit ds1742_rtc_remove(struct platform_device *pdev)
sysfs_remove_bin_file(&pdev->dev.kobj, &pdata->nvram_attr);
rtc_device_unregister(pdata->rtc);
- iounmap(pdata->ioaddr_nvram);
- release_mem_region(pdata->baseaddr, pdata->size);
- kfree(pdata);
return 0;
}
diff --git a/drivers/rtc/rtc-m48t35.c b/drivers/rtc/rtc-m48t35.c
index 0b219755994..8cb5b8959e5 100644
--- a/drivers/rtc/rtc-m48t35.c
+++ b/drivers/rtc/rtc-m48t35.c
@@ -142,7 +142,6 @@ static const struct rtc_class_ops m48t35_ops = {
static int __devinit m48t35_probe(struct platform_device *pdev)
{
- struct rtc_device *rtc;
struct resource *res;
struct m48t35_priv *priv;
int ret = 0;
@@ -171,20 +170,21 @@ static int __devinit m48t35_probe(struct platform_device *pdev)
ret = -ENOMEM;
goto out;
}
+
spin_lock_init(&priv->lock);
- rtc = rtc_device_register("m48t35", &pdev->dev,
+
+ platform_set_drvdata(pdev, priv);
+
+ priv->rtc = rtc_device_register("m48t35", &pdev->dev,
&m48t35_ops, THIS_MODULE);
- if (IS_ERR(rtc)) {
- ret = PTR_ERR(rtc);
+ if (IS_ERR(priv->rtc)) {
+ ret = PTR_ERR(priv->rtc);
goto out;
}
- priv->rtc = rtc;
- platform_set_drvdata(pdev, priv);
+
return 0;
out:
- if (priv->rtc)
- rtc_device_unregister(priv->rtc);
if (priv->reg)
iounmap(priv->reg);
if (priv->baseaddr)
diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
index 33921a6b170..ede43b84685 100644
--- a/drivers/rtc/rtc-m48t59.c
+++ b/drivers/rtc/rtc-m48t59.c
@@ -481,6 +481,9 @@ static int __devinit m48t59_rtc_probe(struct platform_device *pdev)
goto out;
}
+ spin_lock_init(&m48t59->lock);
+ platform_set_drvdata(pdev, m48t59);
+
m48t59->rtc = rtc_device_register(name, &pdev->dev, ops, THIS_MODULE);
if (IS_ERR(m48t59->rtc)) {
ret = PTR_ERR(m48t59->rtc);
@@ -490,16 +493,14 @@ static int __devinit m48t59_rtc_probe(struct platform_device *pdev)
m48t59_nvram_attr.size = pdata->offset;
ret = sysfs_create_bin_file(&pdev->dev.kobj, &m48t59_nvram_attr);
- if (ret)
+ if (ret) {
+ rtc_device_unregister(m48t59->rtc);
goto out;
+ }
- spin_lock_init(&m48t59->lock);
- platform_set_drvdata(pdev, m48t59);
return 0;
out:
- if (!IS_ERR(m48t59->rtc))
- rtc_device_unregister(m48t59->rtc);
if (m48t59->irq != NO_IRQ)
free_irq(m48t59->irq, &pdev->dev);
if (m48t59->ioaddr)
diff --git a/drivers/rtc/rtc-mc13783.c b/drivers/rtc/rtc-mc13783.c
new file mode 100644
index 00000000000..850f983c039
--- /dev/null
+++ b/drivers/rtc/rtc-mc13783.c
@@ -0,0 +1,262 @@
+/*
+ * Real Time Clock driver for Freescale MC13783 PMIC
+ *
+ * (C) 2009 Sascha Hauer, Pengutronix
+ * (C) 2009 Uwe Kleine-Koenig, Pengutronix
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include
+#include
+#include
+#include
+#include
+
+#define DRIVER_NAME "mc13783-rtc"
+
+#define MC13783_RTCTOD 20
+#define MC13783_RTCTODA 21
+#define MC13783_RTCDAY 22
+#define MC13783_RTCDAYA 23
+
+struct mc13783_rtc {
+ struct rtc_device *rtc;
+ struct mc13783 *mc13783;
+ int valid;
+};
+
+static int mc13783_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct mc13783_rtc *priv = dev_get_drvdata(dev);
+ unsigned int seconds, days1, days2;
+ unsigned long s1970;
+ int ret;
+
+ mc13783_lock(priv->mc13783);
+
+ if (!priv->valid) {
+ ret = -ENODATA;
+ goto out;
+ }
+
+ ret = mc13783_reg_read(priv->mc13783, MC13783_RTCDAY, &days1);
+ if (unlikely(ret))
+ goto out;
+
+ ret = mc13783_reg_read(priv->mc13783, MC13783_RTCTOD, &seconds);
+ if (unlikely(ret))
+ goto out;
+
+ ret = mc13783_reg_read(priv->mc13783, MC13783_RTCDAY, &days2);
+out:
+ mc13783_unlock(priv->mc13783);
+
+ if (ret)
+ return ret;
+
+ if (days2 == days1 + 1) {
+ if (seconds >= 86400 / 2)
+ days2 = days1;
+ else
+ days1 = days2;
+ }
+
+ if (days1 != days2)
+ return -EIO;
+
+ s1970 = days1 * 86400 + seconds;
+
+ rtc_time_to_tm(s1970, tm);
+
+ return rtc_valid_tm(tm);
+}
+
+static int mc13783_rtc_set_mmss(struct device *dev, unsigned long secs)
+{
+ struct mc13783_rtc *priv = dev_get_drvdata(dev);
+ unsigned int seconds, days;
+ int ret;
+
+ seconds = secs % 86400;
+ days = secs / 86400;
+
+ mc13783_lock(priv->mc13783);
+
+ /*
+ * first write seconds=0 to prevent a day switch between writing days
+ * and seconds below
+ */
+ ret = mc13783_reg_write(priv->mc13783, MC13783_RTCTOD, 0);
+ if (unlikely(ret))
+ goto out;
+
+ ret = mc13783_reg_write(priv->mc13783, MC13783_RTCDAY, days);
+ if (unlikely(ret))
+ goto out;
+
+ ret = mc13783_reg_write(priv->mc13783, MC13783_RTCTOD, seconds);
+ if (unlikely(ret))
+ goto out;
+
+ ret = mc13783_ackirq(priv->mc13783, MC13783_IRQ_RTCRST);
+ if (unlikely(ret))
+ goto out;
+
+ ret = mc13783_unmask(priv->mc13783, MC13783_IRQ_RTCRST);
+out:
+ priv->valid = !ret;
+
+ mc13783_unlock(priv->mc13783);
+
+ return ret;
+}
+
+static irqreturn_t mc13783_rtc_update_handler(int irq, void *dev)
+{
+ struct mc13783_rtc *priv = dev;
+ struct mc13783 *mc13783 = priv->mc13783;
+
+ dev_dbg(&priv->rtc->dev, "1HZ\n");
+
+ rtc_update_irq(priv->rtc, 1, RTC_IRQF | RTC_UF);
+
+ mc13783_ackirq(mc13783, irq);
+
+ return IRQ_HANDLED;
+}
+
+static int mc13783_rtc_update_irq_enable(struct device *dev,
+ unsigned int enabled)
+{
+ struct mc13783_rtc *priv = dev_get_drvdata(dev);
+ int ret = -ENODATA;
+
+ mc13783_lock(priv->mc13783);
+ if (!priv->valid)
+ goto out;
+
+ ret = (enabled ? mc13783_unmask : mc13783_mask)(priv->mc13783,
+ MC13783_IRQ_1HZ);
+out:
+ mc13783_unlock(priv->mc13783);
+
+ return ret;
+}
+
+static const struct rtc_class_ops mc13783_rtc_ops = {
+ .read_time = mc13783_rtc_read_time,
+ .set_mmss = mc13783_rtc_set_mmss,
+ .update_irq_enable = mc13783_rtc_update_irq_enable,
+};
+
+static irqreturn_t mc13783_rtc_reset_handler(int irq, void *dev)
+{
+ struct mc13783_rtc *priv = dev;
+ struct mc13783 *mc13783 = priv->mc13783;
+
+ dev_dbg(&priv->rtc->dev, "RTCRST\n");
+ priv->valid = 0;
+
+ mc13783_mask(mc13783, irq);
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit mc13783_rtc_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct mc13783_rtc *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->mc13783 = dev_get_drvdata(pdev->dev.parent);
+ platform_set_drvdata(pdev, priv);
+
+ priv->valid = 1;
+
+ mc13783_lock(priv->mc13783);
+
+ ret = mc13783_irq_request(priv->mc13783, MC13783_IRQ_RTCRST,
+ mc13783_rtc_reset_handler, DRIVER_NAME, priv);
+ if (ret)
+ goto err_reset_irq_request;
+
+ ret = mc13783_irq_request_nounmask(priv->mc13783, MC13783_IRQ_1HZ,
+ mc13783_rtc_update_handler, DRIVER_NAME, priv);
+ if (ret)
+ goto err_update_irq_request;
+
+ mc13783_unlock(priv->mc13783);
+
+ priv->rtc = rtc_device_register(pdev->name,
+ &pdev->dev, &mc13783_rtc_ops, THIS_MODULE);
+
+ if (IS_ERR(priv->rtc)) {
+ ret = PTR_ERR(priv->rtc);
+
+ mc13783_lock(priv->mc13783);
+
+ mc13783_irq_free(priv->mc13783, MC13783_IRQ_1HZ, priv);
+err_update_irq_request:
+
+ mc13783_irq_free(priv->mc13783, MC13783_IRQ_RTCRST, priv);
+err_reset_irq_request:
+
+ mc13783_unlock(priv->mc13783);
+
+ platform_set_drvdata(pdev, NULL);
+ kfree(priv);
+ }
+
+ return ret;
+}
+
+static int __devexit mc13783_rtc_remove(struct platform_device *pdev)
+{
+ struct mc13783_rtc *priv = platform_get_drvdata(pdev);
+
+ rtc_device_unregister(priv->rtc);
+
+ mc13783_lock(priv->mc13783);
+
+ mc13783_irq_free(priv->mc13783, MC13783_IRQ_1HZ, priv);
+ mc13783_irq_free(priv->mc13783, MC13783_IRQ_RTCRST, priv);
+
+ mc13783_unlock(priv->mc13783);
+
+ platform_set_drvdata(pdev, NULL);
+
+ kfree(priv);
+
+ return 0;
+}
+
+static struct platform_driver mc13783_rtc_driver = {
+ .remove = __devexit_p(mc13783_rtc_remove),
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init mc13783_rtc_init(void)
+{
+ return platform_driver_probe(&mc13783_rtc_driver, &mc13783_rtc_probe);
+}
+module_init(mc13783_rtc_init);
+
+static void __exit mc13783_rtc_exit(void)
+{
+ platform_driver_unregister(&mc13783_rtc_driver);
+}
+module_exit(mc13783_rtc_exit);
+
+MODULE_AUTHOR("Sascha Hauer ");
+MODULE_DESCRIPTION("RTC driver for Freescale MC13783 PMIC");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/rtc/rtc-mv.c b/drivers/rtc/rtc-mv.c
index e0263d2005e..dc052ce6e63 100644
--- a/drivers/rtc/rtc-mv.c
+++ b/drivers/rtc/rtc-mv.c
@@ -27,10 +27,17 @@
#define RTC_MONTH_OFFS 8
#define RTC_YEAR_OFFS 16
+#define RTC_ALARM_TIME_REG_OFFS 8
+#define RTC_ALARM_DATE_REG_OFFS 0xc
+#define RTC_ALARM_VALID (1 << 7)
+
+#define RTC_ALARM_INTERRUPT_MASK_REG_OFFS 0x10
+#define RTC_ALARM_INTERRUPT_CASUE_REG_OFFS 0x14
struct rtc_plat_data {
struct rtc_device *rtc;
void __iomem *ioaddr;
+ int irq;
};
static int mv_rtc_set_time(struct device *dev, struct rtc_time *tm)
@@ -84,12 +91,134 @@ static int mv_rtc_read_time(struct device *dev, struct rtc_time *tm)
return rtc_valid_tm(tm);
}
+static int mv_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
+{
+ struct rtc_plat_data *pdata = dev_get_drvdata(dev);
+ void __iomem *ioaddr = pdata->ioaddr;
+ u32 rtc_time, rtc_date;
+ unsigned int year, month, day, hour, minute, second, wday;
+
+ rtc_time = readl(ioaddr + RTC_ALARM_TIME_REG_OFFS);
+ rtc_date = readl(ioaddr + RTC_ALARM_DATE_REG_OFFS);
+
+ second = rtc_time & 0x7f;
+ minute = (rtc_time >> RTC_MINUTES_OFFS) & 0x7f;
+ hour = (rtc_time >> RTC_HOURS_OFFS) & 0x3f; /* assume 24 hours mode */
+ wday = (rtc_time >> RTC_WDAY_OFFS) & 0x7;
+
+ day = rtc_date & 0x3f;
+ month = (rtc_date >> RTC_MONTH_OFFS) & 0x3f;
+ year = (rtc_date >> RTC_YEAR_OFFS) & 0xff;
+
+ alm->time.tm_sec = bcd2bin(second);
+ alm->time.tm_min = bcd2bin(minute);
+ alm->time.tm_hour = bcd2bin(hour);
+ alm->time.tm_mday = bcd2bin(day);
+ alm->time.tm_wday = bcd2bin(wday);
+ alm->time.tm_mon = bcd2bin(month) - 1;
+ /* hw counts from year 2000, but tm_year is relative to 1900 */
+ alm->time.tm_year = bcd2bin(year) + 100;
+
+ if (rtc_valid_tm(&alm->time) < 0) {
+ dev_err(dev, "retrieved alarm date/time is not valid.\n");
+ rtc_time_to_tm(0, &alm->time);
+ }
+
+ alm->enabled = !!readl(ioaddr + RTC_ALARM_INTERRUPT_MASK_REG_OFFS);
+ return 0;
+}
+
+static int mv_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
+{
+ struct rtc_plat_data *pdata = dev_get_drvdata(dev);
+ void __iomem *ioaddr = pdata->ioaddr;
+ u32 rtc_reg = 0;
+
+ if (alm->time.tm_sec >= 0)
+ rtc_reg |= (RTC_ALARM_VALID | bin2bcd(alm->time.tm_sec))
+ << RTC_SECONDS_OFFS;
+ if (alm->time.tm_min >= 0)
+ rtc_reg |= (RTC_ALARM_VALID | bin2bcd(alm->time.tm_min))
+ << RTC_MINUTES_OFFS;
+ if (alm->time.tm_hour >= 0)
+ rtc_reg |= (RTC_ALARM_VALID | bin2bcd(alm->time.tm_hour))
+ << RTC_HOURS_OFFS;
+
+ writel(rtc_reg, ioaddr + RTC_ALARM_TIME_REG_OFFS);
+
+ if (alm->time.tm_mday >= 0)
+ rtc_reg = (RTC_ALARM_VALID | bin2bcd(alm->time.tm_mday))
+ << RTC_MDAY_OFFS;
+ else
+ rtc_reg = 0;
+
+ if (alm->time.tm_mon >= 0)
+ rtc_reg |= (RTC_ALARM_VALID | bin2bcd(alm->time.tm_mon + 1))
+ << RTC_MONTH_OFFS;
+
+ if (alm->time.tm_year >= 0)
+ rtc_reg |= (RTC_ALARM_VALID | bin2bcd(alm->time.tm_year % 100))
+ << RTC_YEAR_OFFS;
+
+ writel(rtc_reg, ioaddr + RTC_ALARM_DATE_REG_OFFS);
+ writel(0, ioaddr + RTC_ALARM_INTERRUPT_CASUE_REG_OFFS);
+ writel(alm->enabled ? 1 : 0,
+ ioaddr + RTC_ALARM_INTERRUPT_MASK_REG_OFFS);
+
+ return 0;
+}
+
+static int mv_rtc_ioctl(struct device *dev, unsigned int cmd,
+ unsigned long arg)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+ void __iomem *ioaddr = pdata->ioaddr;
+
+ if (pdata->irq < 0)
+ return -ENOIOCTLCMD; /* fall back into rtc-dev's emulation */
+ switch (cmd) {
+ case RTC_AIE_OFF:
+ writel(0, ioaddr + RTC_ALARM_INTERRUPT_MASK_REG_OFFS);
+ break;
+ case RTC_AIE_ON:
+ writel(1, ioaddr + RTC_ALARM_INTERRUPT_MASK_REG_OFFS);
+ break;
+ default:
+ return -ENOIOCTLCMD;
+ }
+ return 0;
+}
+
+static irqreturn_t mv_rtc_interrupt(int irq, void *data)
+{
+ struct rtc_plat_data *pdata = data;
+ void __iomem *ioaddr = pdata->ioaddr;
+
+ /* alarm irq? */
+ if (!readl(ioaddr + RTC_ALARM_INTERRUPT_CASUE_REG_OFFS))
+ return IRQ_NONE;
+
+ /* clear interrupt */
+ writel(0, ioaddr + RTC_ALARM_INTERRUPT_CASUE_REG_OFFS);
+ rtc_update_irq(pdata->rtc, 1, RTC_IRQF | RTC_AF);
+ return IRQ_HANDLED;
+}
+
static const struct rtc_class_ops mv_rtc_ops = {
.read_time = mv_rtc_read_time,
.set_time = mv_rtc_set_time,
};
-static int __init mv_rtc_probe(struct platform_device *pdev)
+static const struct rtc_class_ops mv_rtc_alarm_ops = {
+ .read_time = mv_rtc_read_time,
+ .set_time = mv_rtc_set_time,
+ .read_alarm = mv_rtc_read_alarm,
+ .set_alarm = mv_rtc_set_alarm,
+ .ioctl = mv_rtc_ioctl,
+};
+
+static int __devinit mv_rtc_probe(struct platform_device *pdev)
{
struct resource *res;
struct rtc_plat_data *pdata;
@@ -130,12 +259,31 @@ static int __init mv_rtc_probe(struct platform_device *pdev)
}
}
+ pdata->irq = platform_get_irq(pdev, 0);
+
platform_set_drvdata(pdev, pdata);
- pdata->rtc = rtc_device_register(pdev->name, &pdev->dev,
- &mv_rtc_ops, THIS_MODULE);
+
+ if (pdata->irq >= 0) {
+ device_init_wakeup(&pdev->dev, 1);
+ pdata->rtc = rtc_device_register(pdev->name, &pdev->dev,
+ &mv_rtc_alarm_ops,
+ THIS_MODULE);
+ } else
+ pdata->rtc = rtc_device_register(pdev->name, &pdev->dev,
+ &mv_rtc_ops, THIS_MODULE);
if (IS_ERR(pdata->rtc))
return PTR_ERR(pdata->rtc);
+ if (pdata->irq >= 0) {
+ writel(0, pdata->ioaddr + RTC_ALARM_INTERRUPT_MASK_REG_OFFS);
+ if (devm_request_irq(&pdev->dev, pdata->irq, mv_rtc_interrupt,
+ IRQF_DISABLED | IRQF_SHARED,
+ pdev->name, pdata) < 0) {
+ dev_warn(&pdev->dev, "interrupt not available.\n");
+ pdata->irq = -1;
+ }
+ }
+
return 0;
}
@@ -143,6 +291,9 @@ static int __exit mv_rtc_remove(struct platform_device *pdev)
{
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+ if (pdata->irq >= 0)
+ device_init_wakeup(&pdev->dev, 0);
+
rtc_device_unregister(pdata->rtc);
return 0;
}
diff --git a/drivers/rtc/rtc-nuc900.c b/drivers/rtc/rtc-nuc900.c
new file mode 100644
index 00000000000..bf59c9c586b
--- /dev/null
+++ b/drivers/rtc/rtc-nuc900.c
@@ -0,0 +1,342 @@
+/*
+ * Copyright (c) 2008-2009 Nuvoton technology corporation.
+ *
+ * Wan ZongShun
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation;version 2 of the License.
+ *
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+/* RTC Control Registers */
+#define REG_RTC_INIR 0x00
+#define REG_RTC_AER 0x04
+#define REG_RTC_FCR 0x08
+#define REG_RTC_TLR 0x0C
+#define REG_RTC_CLR 0x10
+#define REG_RTC_TSSR 0x14
+#define REG_RTC_DWR 0x18
+#define REG_RTC_TAR 0x1C
+#define REG_RTC_CAR 0x20
+#define REG_RTC_LIR 0x24
+#define REG_RTC_RIER 0x28
+#define REG_RTC_RIIR 0x2C
+#define REG_RTC_TTR 0x30
+
+#define RTCSET 0x01
+#define AERRWENB 0x10000
+#define INIRRESET 0xa5eb1357
+#define AERPOWERON 0xA965
+#define AERPOWEROFF 0x0000
+#define LEAPYEAR 0x0001
+#define TICKENB 0x80
+#define TICKINTENB 0x0002
+#define ALARMINTENB 0x0001
+#define MODE24 0x0001
+
+struct nuc900_rtc {
+ int irq_num;
+ void __iomem *rtc_reg;
+ struct rtc_device *rtcdev;
+};
+
+struct nuc900_bcd_time {
+ int bcd_sec;
+ int bcd_min;
+ int bcd_hour;
+ int bcd_mday;
+ int bcd_mon;
+ int bcd_year;
+};
+
+static irqreturn_t nuc900_rtc_interrupt(int irq, void *_rtc)
+{
+ struct nuc900_rtc *rtc = _rtc;
+ unsigned long events = 0, rtc_irq;
+
+ rtc_irq = __raw_readl(rtc->rtc_reg + REG_RTC_RIIR);
+
+ if (rtc_irq & ALARMINTENB) {
+ rtc_irq &= ~ALARMINTENB;
+ __raw_writel(rtc_irq, rtc->rtc_reg + REG_RTC_RIIR);
+ events |= RTC_AF | RTC_IRQF;
+ }
+
+ if (rtc_irq & TICKINTENB) {
+ rtc_irq &= ~TICKINTENB;
+ __raw_writel(rtc_irq, rtc->rtc_reg + REG_RTC_RIIR);
+ events |= RTC_UF | RTC_IRQF;
+ }
+
+ rtc_update_irq(rtc->rtcdev, 1, events);
+
+ return IRQ_HANDLED;
+}
+
+static int *check_rtc_access_enable(struct nuc900_rtc *nuc900_rtc)
+{
+ unsigned int i;
+ __raw_writel(INIRRESET, nuc900_rtc->rtc_reg + REG_RTC_INIR);
+
+ mdelay(10);
+
+ __raw_writel(AERPOWERON, nuc900_rtc->rtc_reg + REG_RTC_AER);
+
+ for (i = 0; i < 1000; i++) {
+ if (__raw_readl(nuc900_rtc->rtc_reg + REG_RTC_AER) & AERRWENB)
+ return 0;
+ }
+
+ if ((__raw_readl(nuc900_rtc->rtc_reg + REG_RTC_AER) & AERRWENB) == 0x0)
+ return ERR_PTR(-ENODEV);
+
+ return ERR_PTR(-EPERM);
+}
+
+static void nuc900_rtc_bcd2bin(unsigned int timereg,
+ unsigned int calreg, struct rtc_time *tm)
+{
+ tm->tm_mday = bcd2bin(calreg >> 0);
+ tm->tm_mon = bcd2bin(calreg >> 8);
+ tm->tm_year = bcd2bin(calreg >> 16) + 100;
+
+ tm->tm_sec = bcd2bin(timereg >> 0);
+ tm->tm_min = bcd2bin(timereg >> 8);
+ tm->tm_hour = bcd2bin(timereg >> 16);
+
+ rtc_valid_tm(tm);
+}
+
+static void nuc900_rtc_bin2bcd(struct rtc_time *settm,
+ struct nuc900_bcd_time *gettm)
+{
+ gettm->bcd_mday = bin2bcd(settm->tm_mday) << 0;
+ gettm->bcd_mon = bin2bcd(settm->tm_mon) << 8;
+ gettm->bcd_year = bin2bcd(settm->tm_year - 100) << 16;
+
+ gettm->bcd_sec = bin2bcd(settm->tm_sec) << 0;
+ gettm->bcd_min = bin2bcd(settm->tm_min) << 8;
+ gettm->bcd_hour = bin2bcd(settm->tm_hour) << 16;
+}
+
+static int nuc900_update_irq_enable(struct device *dev, unsigned int enabled)
+{
+ struct nuc900_rtc *rtc = dev_get_drvdata(dev);
+
+ if (enabled)
+ __raw_writel(__raw_readl(rtc->rtc_reg + REG_RTC_RIER)|
+ (TICKINTENB), rtc->rtc_reg + REG_RTC_RIER);
+ else
+ __raw_writel(__raw_readl(rtc->rtc_reg + REG_RTC_RIER)&
+ (~TICKINTENB), rtc->rtc_reg + REG_RTC_RIER);
+
+ return 0;
+}
+
+static int nuc900_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+ struct nuc900_rtc *rtc = dev_get_drvdata(dev);
+
+ if (enabled)
+ __raw_writel(__raw_readl(rtc->rtc_reg + REG_RTC_RIER)|
+ (ALARMINTENB), rtc->rtc_reg + REG_RTC_RIER);
+ else
+ __raw_writel(__raw_readl(rtc->rtc_reg + REG_RTC_RIER)&
+ (~ALARMINTENB), rtc->rtc_reg + REG_RTC_RIER);
+
+ return 0;
+}
+
+static int nuc900_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct nuc900_rtc *rtc = dev_get_drvdata(dev);
+ unsigned int timeval, clrval;
+
+ timeval = __raw_readl(rtc->rtc_reg + REG_RTC_TLR);
+ clrval = __raw_readl(rtc->rtc_reg + REG_RTC_CLR);
+
+ nuc900_rtc_bcd2bin(timeval, clrval, tm);
+
+ return 0;
+}
+
+static int nuc900_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct nuc900_rtc *rtc = dev_get_drvdata(dev);
+ struct nuc900_bcd_time gettm;
+ unsigned long val;
+ int *err;
+
+ nuc900_rtc_bin2bcd(tm, &gettm);
+
+ err = check_rtc_access_enable(rtc);
+ if (IS_ERR(err))
+ return PTR_ERR(err);
+
+ val = gettm.bcd_mday | gettm.bcd_mon | gettm.bcd_year;
+ __raw_writel(val, rtc->rtc_reg + REG_RTC_CLR);
+
+ val = gettm.bcd_sec | gettm.bcd_min | gettm.bcd_hour;
+ __raw_writel(val, rtc->rtc_reg + REG_RTC_TLR);
+
+ return 0;
+}
+
+static int nuc900_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct nuc900_rtc *rtc = dev_get_drvdata(dev);
+ unsigned int timeval, carval;
+
+ timeval = __raw_readl(rtc->rtc_reg + REG_RTC_TAR);
+ carval = __raw_readl(rtc->rtc_reg + REG_RTC_CAR);
+
+ nuc900_rtc_bcd2bin(timeval, carval, &alrm->time);
+
+ return 0;
+}
+
+static int nuc900_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct nuc900_rtc *rtc = dev_get_drvdata(dev);
+ struct nuc900_bcd_time tm;
+ unsigned long val;
+ int *err;
+
+ nuc900_rtc_bin2bcd(&alrm->time, &tm);
+
+ err = check_rtc_access_enable(rtc);
+ if (IS_ERR(err))
+ return PTR_ERR(err);
+
+ val = tm.bcd_mday | tm.bcd_mon | tm.bcd_year;
+ __raw_writel(val, rtc->rtc_reg + REG_RTC_CAR);
+
+ val = tm.bcd_sec | tm.bcd_min | tm.bcd_hour;
+ __raw_writel(val, rtc->rtc_reg + REG_RTC_TAR);
+
+ return 0;
+}
+
+static struct rtc_class_ops nuc900_rtc_ops = {
+ .read_time = nuc900_rtc_read_time,
+ .set_time = nuc900_rtc_set_time,
+ .read_alarm = nuc900_rtc_read_alarm,
+ .set_alarm = nuc900_rtc_set_alarm,
+ .alarm_irq_enable = nuc900_alarm_irq_enable,
+ .update_irq_enable = nuc900_update_irq_enable,
+};
+
+static int __devinit nuc900_rtc_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct nuc900_rtc *nuc900_rtc;
+ int err = 0;
+
+ nuc900_rtc = kzalloc(sizeof(struct nuc900_rtc), GFP_KERNEL);
+ if (!nuc900_rtc) {
+ dev_err(&pdev->dev, "kzalloc nuc900_rtc failed\n");
+ return -ENOMEM;
+ }
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "platform_get_resource failed\n");
+ err = -ENXIO;
+ goto fail1;
+ }
+
+ if (!request_mem_region(res->start, resource_size(res),
+ pdev->name)) {
+ dev_err(&pdev->dev, "request_mem_region failed\n");
+ err = -EBUSY;
+ goto fail1;
+ }
+
+ nuc900_rtc->rtc_reg = ioremap(res->start, resource_size(res));
+ if (!nuc900_rtc->rtc_reg) {
+ dev_err(&pdev->dev, "ioremap rtc_reg failed\n");
+ err = -ENOMEM;
+ goto fail2;
+ }
+
+ nuc900_rtc->irq_num = platform_get_irq(pdev, 0);
+ if (request_irq(nuc900_rtc->irq_num, nuc900_rtc_interrupt,
+ IRQF_DISABLED, "nuc900rtc", nuc900_rtc)) {
+ dev_err(&pdev->dev, "NUC900 RTC request irq failed\n");
+ err = -EBUSY;
+ goto fail3;
+ }
+
+ nuc900_rtc->rtcdev = rtc_device_register(pdev->name, &pdev->dev,
+ &nuc900_rtc_ops, THIS_MODULE);
+ if (IS_ERR(nuc900_rtc->rtcdev)) {
+ dev_err(&pdev->dev, "rtc device register faild\n");
+ err = PTR_ERR(nuc900_rtc->rtcdev);
+ goto fail4;
+ }
+
+ platform_set_drvdata(pdev, nuc900_rtc);
+ __raw_writel(__raw_readl(nuc900_rtc->rtc_reg + REG_RTC_TSSR) | MODE24,
+ nuc900_rtc->rtc_reg + REG_RTC_TSSR);
+
+ return 0;
+
+fail4: free_irq(nuc900_rtc->irq_num, nuc900_rtc);
+fail3: iounmap(nuc900_rtc->rtc_reg);
+fail2: release_mem_region(res->start, resource_size(res));
+fail1: kfree(nuc900_rtc);
+ return err;
+}
+
+static int __devexit nuc900_rtc_remove(struct platform_device *pdev)
+{
+ struct nuc900_rtc *nuc900_rtc = platform_get_drvdata(pdev);
+ struct resource *res;
+
+ rtc_device_unregister(nuc900_rtc->rtcdev);
+ free_irq(nuc900_rtc->irq_num, nuc900_rtc);
+ iounmap(nuc900_rtc->rtc_reg);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(res->start, resource_size(res));
+
+ kfree(nuc900_rtc);
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver nuc900_rtc_driver = {
+ .remove = __devexit_p(nuc900_rtc_remove),
+ .driver = {
+ .name = "nuc900-rtc",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init nuc900_rtc_init(void)
+{
+ return platform_driver_probe(&nuc900_rtc_driver, nuc900_rtc_probe);
+}
+
+static void __exit nuc900_rtc_exit(void)
+{
+ platform_driver_unregister(&nuc900_rtc_driver);
+}
+
+module_init(nuc900_rtc_init);
+module_exit(nuc900_rtc_exit);
+
+MODULE_AUTHOR("Wan ZongShun ");
+MODULE_DESCRIPTION("nuc910/nuc920 RTC driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:nuc900-rtc");
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index 0587d53987f..64d9727b722 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -87,9 +87,10 @@
#define OMAP_RTC_INTERRUPTS_IT_ALARM (1<<3)
#define OMAP_RTC_INTERRUPTS_IT_TIMER (1<<2)
+static void __iomem *rtc_base;
-#define rtc_read(addr) omap_readb(OMAP_RTC_BASE + (addr))
-#define rtc_write(val, addr) omap_writeb(val, OMAP_RTC_BASE + (addr))
+#define rtc_read(addr) __raw_readb(rtc_base + (addr))
+#define rtc_write(val, addr) __raw_writeb(val, rtc_base + (addr))
/* we rely on the rtc framework to handle locking (rtc->ops_lock),
@@ -330,32 +331,31 @@ static int __init omap_rtc_probe(struct platform_device *pdev)
return -ENOENT;
}
- /* NOTE: using static mapping for RTC registers */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res && res->start != OMAP_RTC_BASE) {
- pr_debug("%s: RTC registers at %08x, expected %08x\n",
- pdev->name, (unsigned) res->start, OMAP_RTC_BASE);
+ if (!res) {
+ pr_debug("%s: RTC resource data missing\n", pdev->name);
return -ENOENT;
}
- if (res)
- mem = request_mem_region(res->start,
- res->end - res->start + 1,
- pdev->name);
- else
- mem = NULL;
+ mem = request_mem_region(res->start, resource_size(res), pdev->name);
if (!mem) {
pr_debug("%s: RTC registers at %08x are not free\n",
- pdev->name, OMAP_RTC_BASE);
+ pdev->name, res->start);
return -EBUSY;
}
+ rtc_base = ioremap(res->start, resource_size(res));
+ if (!rtc_base) {
+ pr_debug("%s: RTC registers can't be mapped\n", pdev->name);
+ goto fail;
+ }
+
rtc = rtc_device_register(pdev->name, &pdev->dev,
&omap_rtc_ops, THIS_MODULE);
if (IS_ERR(rtc)) {
pr_debug("%s: can't register RTC device, err %ld\n",
pdev->name, PTR_ERR(rtc));
- goto fail;
+ goto fail0;
}
platform_set_drvdata(pdev, rtc);
dev_set_drvdata(&rtc->dev, mem);
@@ -380,13 +380,14 @@ static int __init omap_rtc_probe(struct platform_device *pdev)
dev_name(&rtc->dev), rtc)) {
pr_debug("%s: RTC timer interrupt IRQ%d already claimed\n",
pdev->name, omap_rtc_timer);
- goto fail0;
+ goto fail1;
}
- if (request_irq(omap_rtc_alarm, rtc_irq, IRQF_DISABLED,
- dev_name(&rtc->dev), rtc)) {
+ if ((omap_rtc_timer != omap_rtc_alarm) &&
+ (request_irq(omap_rtc_alarm, rtc_irq, IRQF_DISABLED,
+ dev_name(&rtc->dev), rtc))) {
pr_debug("%s: RTC alarm interrupt IRQ%d already claimed\n",
pdev->name, omap_rtc_alarm);
- goto fail1;
+ goto fail2;
}
/* On boards with split power, RTC_ON_NOFF won't reset the RTC */
@@ -419,10 +420,12 @@ static int __init omap_rtc_probe(struct platform_device *pdev)
return 0;
-fail1:
+fail2:
free_irq(omap_rtc_timer, NULL);
-fail0:
+fail1:
rtc_device_unregister(rtc);
+fail0:
+ iounmap(rtc_base);
fail:
release_resource(mem);
return -EIO;
@@ -438,7 +441,9 @@ static int __exit omap_rtc_remove(struct platform_device *pdev)
rtc_write(0, OMAP_RTC_INTERRUPTS_REG);
free_irq(omap_rtc_timer, rtc);
- free_irq(omap_rtc_alarm, rtc);
+
+ if (omap_rtc_timer != omap_rtc_alarm)
+ free_irq(omap_rtc_alarm, rtc);
release_resource(dev_get_drvdata(&rtc->dev));
rtc_device_unregister(rtc);
diff --git a/drivers/rtc/rtc-pcf50633.c b/drivers/rtc/rtc-pcf50633.c
index 9b74e9c9151..854c3cb365a 100644
--- a/drivers/rtc/rtc-pcf50633.c
+++ b/drivers/rtc/rtc-pcf50633.c
@@ -58,6 +58,7 @@ struct pcf50633_time {
struct pcf50633_rtc {
int alarm_enabled;
int second_enabled;
+ int alarm_pending;
struct pcf50633 *pcf;
struct rtc_device *rtc_dev;
@@ -209,6 +210,7 @@ static int pcf50633_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
rtc = dev_get_drvdata(dev);
alrm->enabled = rtc->alarm_enabled;
+ alrm->pending = rtc->alarm_pending;
ret = pcf50633_read_block(rtc->pcf, PCF50633_REG_RTCSCA,
PCF50633_TI_EXTENT, &pcf_tm.time[0]);
@@ -244,6 +246,8 @@ static int pcf50633_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
/* Returns 0 on success */
ret = pcf50633_write_block(rtc->pcf, PCF50633_REG_RTCSCA,
PCF50633_TI_EXTENT, &pcf_tm.time[0]);
+ if (!alrm->enabled)
+ rtc->alarm_pending = 0;
if (!alarm_masked || alrm->enabled)
pcf50633_irq_unmask(rtc->pcf, PCF50633_IRQ_ALARM);
@@ -268,6 +272,7 @@ static void pcf50633_rtc_irq(int irq, void *data)
switch (irq) {
case PCF50633_IRQ_ALARM:
rtc_update_irq(rtc->rtc_dev, 1, RTC_AF | RTC_IRQF);
+ rtc->alarm_pending = 1;
break;
case PCF50633_IRQ_SECOND:
rtc_update_irq(rtc->rtc_dev, 1, RTC_UF | RTC_IRQF);
diff --git a/drivers/rtc/rtc-pcf8563.c b/drivers/rtc/rtc-pcf8563.c
index b725913ccbe..65f346b2fba 100644
--- a/drivers/rtc/rtc-pcf8563.c
+++ b/drivers/rtc/rtc-pcf8563.c
@@ -212,6 +212,8 @@ static int pcf8563_probe(struct i2c_client *client,
dev_info(&client->dev, "chip found, driver version " DRV_VERSION "\n");
+ i2c_set_clientdata(client, pcf8563);
+
pcf8563->rtc = rtc_device_register(pcf8563_driver.driver.name,
&client->dev, &pcf8563_rtc_ops, THIS_MODULE);
@@ -220,8 +222,6 @@ static int pcf8563_probe(struct i2c_client *client,
goto exit_kfree;
}
- i2c_set_clientdata(client, pcf8563);
-
return 0;
exit_kfree:
diff --git a/drivers/rtc/rtc-pcf8583.c b/drivers/rtc/rtc-pcf8583.c
index 7d33cda3f8f..2d201afead3 100644
--- a/drivers/rtc/rtc-pcf8583.c
+++ b/drivers/rtc/rtc-pcf8583.c
@@ -277,6 +277,8 @@ static int pcf8583_probe(struct i2c_client *client,
if (!pcf8583)
return -ENOMEM;
+ i2c_set_clientdata(client, pcf8583);
+
pcf8583->rtc = rtc_device_register(pcf8583_driver.driver.name,
&client->dev, &pcf8583_rtc_ops, THIS_MODULE);
@@ -285,7 +287,6 @@ static int pcf8583_probe(struct i2c_client *client,
goto exit_kfree;
}
- i2c_set_clientdata(client, pcf8583);
return 0;
exit_kfree:
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
index f41873f98f6..0264b117893 100644
--- a/drivers/rtc/rtc-pl031.c
+++ b/drivers/rtc/rtc-pl031.c
@@ -51,10 +51,10 @@ static int pl031_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
switch (cmd) {
case RTC_AIE_OFF:
- __raw_writel(1, ldata->base + RTC_MIS);
+ writel(1, ldata->base + RTC_MIS);
return 0;
case RTC_AIE_ON:
- __raw_writel(0, ldata->base + RTC_MIS);
+ writel(0, ldata->base + RTC_MIS);
return 0;
}
@@ -65,7 +65,7 @@ static int pl031_read_time(struct device *dev, struct rtc_time *tm)
{
struct pl031_local *ldata = dev_get_drvdata(dev);
- rtc_time_to_tm(__raw_readl(ldata->base + RTC_DR), tm);
+ rtc_time_to_tm(readl(ldata->base + RTC_DR), tm);
return 0;
}
@@ -76,7 +76,7 @@ static int pl031_set_time(struct device *dev, struct rtc_time *tm)
struct pl031_local *ldata = dev_get_drvdata(dev);
rtc_tm_to_time(tm, &time);
- __raw_writel(time, ldata->base + RTC_LR);
+ writel(time, ldata->base + RTC_LR);
return 0;
}
@@ -85,9 +85,9 @@ static int pl031_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
{
struct pl031_local *ldata = dev_get_drvdata(dev);
- rtc_time_to_tm(__raw_readl(ldata->base + RTC_MR), &alarm->time);
- alarm->pending = __raw_readl(ldata->base + RTC_RIS);
- alarm->enabled = __raw_readl(ldata->base + RTC_IMSC);
+ rtc_time_to_tm(readl(ldata->base + RTC_MR), &alarm->time);
+ alarm->pending = readl(ldata->base + RTC_RIS);
+ alarm->enabled = readl(ldata->base + RTC_IMSC);
return 0;
}
@@ -99,8 +99,8 @@ static int pl031_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
rtc_tm_to_time(&alarm->time, &time);
- __raw_writel(time, ldata->base + RTC_MR);
- __raw_writel(!alarm->enabled, ldata->base + RTC_MIS);
+ writel(time, ldata->base + RTC_MR);
+ writel(!alarm->enabled, ldata->base + RTC_MIS);
return 0;
}
@@ -180,8 +180,9 @@ err_req:
static struct amba_id pl031_ids[] __initdata = {
{
- .id = 0x00041031,
- .mask = 0x000fffff, },
+ .id = 0x00041031,
+ .mask = 0x000fffff,
+ },
{0, 0},
};
diff --git a/drivers/rtc/rtc-stk17ta8.c b/drivers/rtc/rtc-stk17ta8.c
index d491eb265c3..67700831b5c 100644
--- a/drivers/rtc/rtc-stk17ta8.c
+++ b/drivers/rtc/rtc-stk17ta8.c
@@ -62,7 +62,6 @@
struct rtc_plat_data {
struct rtc_device *rtc;
void __iomem *ioaddr;
- unsigned long baseaddr;
unsigned long last_jiffies;
int irq;
unsigned int irqen;
@@ -70,6 +69,7 @@ struct rtc_plat_data {
int alrm_min;
int alrm_hour;
int alrm_mday;
+ spinlock_t lock;
};
static int stk17ta8_rtc_set_time(struct device *dev, struct rtc_time *tm)
@@ -142,7 +142,7 @@ static void stk17ta8_rtc_update_alarm(struct rtc_plat_data *pdata)
unsigned long irqflags;
u8 flags;
- spin_lock_irqsave(&pdata->rtc->irq_lock, irqflags);
+ spin_lock_irqsave(&pdata->lock, irqflags);
flags = readb(ioaddr + RTC_FLAGS);
writeb(flags | RTC_WRITE, ioaddr + RTC_FLAGS);
@@ -162,7 +162,7 @@ static void stk17ta8_rtc_update_alarm(struct rtc_plat_data *pdata)
writeb(pdata->irqen ? RTC_INTS_AIE : 0, ioaddr + RTC_INTERRUPTS);
readb(ioaddr + RTC_FLAGS); /* clear interrupts */
writeb(flags & ~RTC_WRITE, ioaddr + RTC_FLAGS);
- spin_unlock_irqrestore(&pdata->rtc->irq_lock, irqflags);
+ spin_unlock_irqrestore(&pdata->lock, irqflags);
}
static int stk17ta8_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
@@ -202,56 +202,53 @@ static irqreturn_t stk17ta8_rtc_interrupt(int irq, void *dev_id)
struct platform_device *pdev = dev_id;
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
void __iomem *ioaddr = pdata->ioaddr;
- unsigned long events = RTC_IRQF;
+ unsigned long events = 0;
+ spin_lock(&pdata->lock);
/* read and clear interrupt */
- if (!(readb(ioaddr + RTC_FLAGS) & RTC_FLAGS_AF))
- return IRQ_NONE;
- if (readb(ioaddr + RTC_SECONDS_ALARM) & 0x80)
- events |= RTC_UF;
- else
- events |= RTC_AF;
- rtc_update_irq(pdata->rtc, 1, events);
- return IRQ_HANDLED;
+ if (readb(ioaddr + RTC_FLAGS) & RTC_FLAGS_AF) {
+ events = RTC_IRQF;
+ if (readb(ioaddr + RTC_SECONDS_ALARM) & 0x80)
+ events |= RTC_UF;
+ else
+ events |= RTC_AF;
+ if (likely(pdata->rtc))
+ rtc_update_irq(pdata->rtc, 1, events);
+ }
+ spin_unlock(&pdata->lock);
+ return events ? IRQ_HANDLED : IRQ_NONE;
}
-static int stk17ta8_rtc_ioctl(struct device *dev, unsigned int cmd,
- unsigned long arg)
+static int stk17ta8_rtc_alarm_irq_enable(struct device *dev,
+ unsigned int enabled)
{
struct platform_device *pdev = to_platform_device(dev);
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
if (pdata->irq <= 0)
- return -ENOIOCTLCMD; /* fall back into rtc-dev's emulation */
- switch (cmd) {
- case RTC_AIE_OFF:
- pdata->irqen &= ~RTC_AF;
- stk17ta8_rtc_update_alarm(pdata);
- break;
- case RTC_AIE_ON:
+ return -EINVAL;
+ if (enabled)
pdata->irqen |= RTC_AF;
- stk17ta8_rtc_update_alarm(pdata);
- break;
- default:
- return -ENOIOCTLCMD;
- }
+ else
+ pdata->irqen &= ~RTC_AF;
+ stk17ta8_rtc_update_alarm(pdata);
return 0;
}
static const struct rtc_class_ops stk17ta8_rtc_ops = {
- .read_time = stk17ta8_rtc_read_time,
- .set_time = stk17ta8_rtc_set_time,
- .read_alarm = stk17ta8_rtc_read_alarm,
- .set_alarm = stk17ta8_rtc_set_alarm,
- .ioctl = stk17ta8_rtc_ioctl,
+ .read_time = stk17ta8_rtc_read_time,
+ .set_time = stk17ta8_rtc_set_time,
+ .read_alarm = stk17ta8_rtc_read_alarm,
+ .set_alarm = stk17ta8_rtc_set_alarm,
+ .alarm_irq_enable = stk17ta8_rtc_alarm_irq_enable,
};
static ssize_t stk17ta8_nvram_read(struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t pos, size_t size)
{
- struct platform_device *pdev =
- to_platform_device(container_of(kobj, struct device, kobj));
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct platform_device *pdev = to_platform_device(dev);
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
void __iomem *ioaddr = pdata->ioaddr;
ssize_t count;
@@ -265,8 +262,8 @@ static ssize_t stk17ta8_nvram_write(struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t pos, size_t size)
{
- struct platform_device *pdev =
- to_platform_device(container_of(kobj, struct device, kobj));
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct platform_device *pdev = to_platform_device(dev);
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
void __iomem *ioaddr = pdata->ioaddr;
ssize_t count;
@@ -288,31 +285,26 @@ static struct bin_attribute stk17ta8_nvram_attr = {
static int __devinit stk17ta8_rtc_probe(struct platform_device *pdev)
{
- struct rtc_device *rtc;
struct resource *res;
unsigned int cal;
unsigned int flags;
struct rtc_plat_data *pdata;
- void __iomem *ioaddr = NULL;
+ void __iomem *ioaddr;
int ret = 0;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENODEV;
- pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
- if (!request_mem_region(res->start, RTC_REG_SIZE, pdev->name)) {
- ret = -EBUSY;
- goto out;
- }
- pdata->baseaddr = res->start;
- ioaddr = ioremap(pdata->baseaddr, RTC_REG_SIZE);
- if (!ioaddr) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!devm_request_mem_region(&pdev->dev, res->start, RTC_REG_SIZE,
+ pdev->name))
+ return -EBUSY;
+ ioaddr = devm_ioremap(&pdev->dev, res->start, RTC_REG_SIZE);
+ if (!ioaddr)
+ return -ENOMEM;
pdata->ioaddr = ioaddr;
pdata->irq = platform_get_irq(pdev, 0);
@@ -328,9 +320,13 @@ static int __devinit stk17ta8_rtc_probe(struct platform_device *pdev)
if (readb(ioaddr + RTC_FLAGS) & RTC_FLAGS_PF)
dev_warn(&pdev->dev, "voltage-low detected.\n");
+ spin_lock_init(&pdata->lock);
+ pdata->last_jiffies = jiffies;
+ platform_set_drvdata(pdev, pdata);
if (pdata->irq > 0) {
writeb(0, ioaddr + RTC_INTERRUPTS);
- if (request_irq(pdata->irq, stk17ta8_rtc_interrupt,
+ if (devm_request_irq(&pdev->dev, pdata->irq,
+ stk17ta8_rtc_interrupt,
IRQF_DISABLED | IRQF_SHARED,
pdev->name, pdev) < 0) {
dev_warn(&pdev->dev, "interrupt not available.\n");
@@ -338,29 +334,14 @@ static int __devinit stk17ta8_rtc_probe(struct platform_device *pdev)
}
}
- rtc = rtc_device_register(pdev->name, &pdev->dev,
+ pdata->rtc = rtc_device_register(pdev->name, &pdev->dev,
&stk17ta8_rtc_ops, THIS_MODULE);
- if (IS_ERR(rtc)) {
- ret = PTR_ERR(rtc);
- goto out;
- }
- pdata->rtc = rtc;
- pdata->last_jiffies = jiffies;
- platform_set_drvdata(pdev, pdata);
+ if (IS_ERR(pdata->rtc))
+ return PTR_ERR(pdata->rtc);
+
ret = sysfs_create_bin_file(&pdev->dev.kobj, &stk17ta8_nvram_attr);
if (ret)
- goto out;
- return 0;
- out:
- if (pdata->rtc)
rtc_device_unregister(pdata->rtc);
- if (pdata->irq > 0)
- free_irq(pdata->irq, pdev);
- if (ioaddr)
- iounmap(ioaddr);
- if (pdata->baseaddr)
- release_mem_region(pdata->baseaddr, RTC_REG_SIZE);
- kfree(pdata);
return ret;
}
@@ -370,13 +351,8 @@ static int __devexit stk17ta8_rtc_remove(struct platform_device *pdev)
sysfs_remove_bin_file(&pdev->dev.kobj, &stk17ta8_nvram_attr);
rtc_device_unregister(pdata->rtc);
- if (pdata->irq > 0) {
+ if (pdata->irq > 0)
writeb(0, pdata->ioaddr + RTC_INTERRUPTS);
- free_irq(pdata->irq, pdev);
- }
- iounmap(pdata->ioaddr);
- release_mem_region(pdata->baseaddr, RTC_REG_SIZE);
- kfree(pdata);
return 0;
}
diff --git a/drivers/rtc/rtc-tx4939.c b/drivers/rtc/rtc-tx4939.c
index 4a6ed1104fb..9ee81d8aa7c 100644
--- a/drivers/rtc/rtc-tx4939.c
+++ b/drivers/rtc/rtc-tx4939.c
@@ -17,6 +17,7 @@
struct tx4939rtc_plat_data {
struct rtc_device *rtc;
struct tx4939_rtc_reg __iomem *rtcreg;
+ spinlock_t lock;
};
static struct tx4939rtc_plat_data *get_tx4939rtc_plat_data(struct device *dev)
@@ -52,14 +53,14 @@ static int tx4939_rtc_set_mmss(struct device *dev, unsigned long secs)
buf[3] = secs >> 8;
buf[4] = secs >> 16;
buf[5] = secs >> 24;
- spin_lock_irq(&pdata->rtc->irq_lock);
+ spin_lock_irq(&pdata->lock);
__raw_writel(0, &rtcreg->adr);
for (i = 0; i < 6; i++)
__raw_writel(buf[i], &rtcreg->dat);
ret = tx4939_rtc_cmd(rtcreg,
TX4939_RTCCTL_COMMAND_SETTIME |
(__raw_readl(&rtcreg->ctl) & TX4939_RTCCTL_ALME));
- spin_unlock_irq(&pdata->rtc->irq_lock);
+ spin_unlock_irq(&pdata->lock);
return ret;
}
@@ -71,18 +72,18 @@ static int tx4939_rtc_read_time(struct device *dev, struct rtc_time *tm)
unsigned long sec;
unsigned char buf[6];
- spin_lock_irq(&pdata->rtc->irq_lock);
+ spin_lock_irq(&pdata->lock);
ret = tx4939_rtc_cmd(rtcreg,
TX4939_RTCCTL_COMMAND_GETTIME |
(__raw_readl(&rtcreg->ctl) & TX4939_RTCCTL_ALME));
if (ret) {
- spin_unlock_irq(&pdata->rtc->irq_lock);
+ spin_unlock_irq(&pdata->lock);
return ret;
}
__raw_writel(2, &rtcreg->adr);
for (i = 2; i < 6; i++)
buf[i] = __raw_readl(&rtcreg->dat);
- spin_unlock_irq(&pdata->rtc->irq_lock);
+ spin_unlock_irq(&pdata->lock);
sec = (buf[5] << 24) | (buf[4] << 16) | (buf[3] << 8) | buf[2];
rtc_time_to_tm(sec, tm);
return rtc_valid_tm(tm);
@@ -110,13 +111,13 @@ static int tx4939_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
buf[3] = sec >> 8;
buf[4] = sec >> 16;
buf[5] = sec >> 24;
- spin_lock_irq(&pdata->rtc->irq_lock);
+ spin_lock_irq(&pdata->lock);
__raw_writel(0, &rtcreg->adr);
for (i = 0; i < 6; i++)
__raw_writel(buf[i], &rtcreg->dat);
ret = tx4939_rtc_cmd(rtcreg, TX4939_RTCCTL_COMMAND_SETALARM |
(alrm->enabled ? TX4939_RTCCTL_ALME : 0));
- spin_unlock_irq(&pdata->rtc->irq_lock);
+ spin_unlock_irq(&pdata->lock);
return ret;
}
@@ -129,12 +130,12 @@ static int tx4939_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
unsigned char buf[6];
u32 ctl;
- spin_lock_irq(&pdata->rtc->irq_lock);
+ spin_lock_irq(&pdata->lock);
ret = tx4939_rtc_cmd(rtcreg,
TX4939_RTCCTL_COMMAND_GETALARM |
(__raw_readl(&rtcreg->ctl) & TX4939_RTCCTL_ALME));
if (ret) {
- spin_unlock_irq(&pdata->rtc->irq_lock);
+ spin_unlock_irq(&pdata->lock);
return ret;
}
__raw_writel(2, &rtcreg->adr);
@@ -143,7 +144,7 @@ static int tx4939_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
ctl = __raw_readl(&rtcreg->ctl);
alrm->enabled = (ctl & TX4939_RTCCTL_ALME) ? 1 : 0;
alrm->pending = (ctl & TX4939_RTCCTL_ALMD) ? 1 : 0;
- spin_unlock_irq(&pdata->rtc->irq_lock);
+ spin_unlock_irq(&pdata->lock);
sec = (buf[5] << 24) | (buf[4] << 16) | (buf[3] << 8) | buf[2];
rtc_time_to_tm(sec, &alrm->time);
return rtc_valid_tm(&alrm->time);
@@ -153,11 +154,11 @@ static int tx4939_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct tx4939rtc_plat_data *pdata = get_tx4939rtc_plat_data(dev);
- spin_lock_irq(&pdata->rtc->irq_lock);
+ spin_lock_irq(&pdata->lock);
tx4939_rtc_cmd(pdata->rtcreg,
TX4939_RTCCTL_COMMAND_NOP |
(enabled ? TX4939_RTCCTL_ALME : 0));
- spin_unlock_irq(&pdata->rtc->irq_lock);
+ spin_unlock_irq(&pdata->lock);
return 0;
}
@@ -167,13 +168,14 @@ static irqreturn_t tx4939_rtc_interrupt(int irq, void *dev_id)
struct tx4939_rtc_reg __iomem *rtcreg = pdata->rtcreg;
unsigned long events = RTC_IRQF;
- spin_lock(&pdata->rtc->irq_lock);
+ spin_lock(&pdata->lock);
if (__raw_readl(&rtcreg->ctl) & TX4939_RTCCTL_ALMD) {
events |= RTC_AF;
tx4939_rtc_cmd(rtcreg, TX4939_RTCCTL_COMMAND_NOP);
}
- spin_unlock(&pdata->rtc->irq_lock);
- rtc_update_irq(pdata->rtc, 1, events);
+ spin_unlock(&pdata->lock);
+ if (likely(pdata->rtc))
+ rtc_update_irq(pdata->rtc, 1, events);
return IRQ_HANDLED;
}
@@ -194,13 +196,13 @@ static ssize_t tx4939_rtc_nvram_read(struct kobject *kobj,
struct tx4939_rtc_reg __iomem *rtcreg = pdata->rtcreg;
ssize_t count;
- spin_lock_irq(&pdata->rtc->irq_lock);
+ spin_lock_irq(&pdata->lock);
for (count = 0; size > 0 && pos < TX4939_RTC_REG_RAMSIZE;
count++, size--) {
__raw_writel(pos++, &rtcreg->adr);
*buf++ = __raw_readl(&rtcreg->dat);
}
- spin_unlock_irq(&pdata->rtc->irq_lock);
+ spin_unlock_irq(&pdata->lock);
return count;
}
@@ -213,13 +215,13 @@ static ssize_t tx4939_rtc_nvram_write(struct kobject *kobj,
struct tx4939_rtc_reg __iomem *rtcreg = pdata->rtcreg;
ssize_t count;
- spin_lock_irq(&pdata->rtc->irq_lock);
+ spin_lock_irq(&pdata->lock);
for (count = 0; size > 0 && pos < TX4939_RTC_REG_RAMSIZE;
count++, size--) {
__raw_writel(pos++, &rtcreg->adr);
__raw_writel(*buf++, &rtcreg->dat);
}
- spin_unlock_irq(&pdata->rtc->irq_lock);
+ spin_unlock_irq(&pdata->lock);
return count;
}
@@ -259,6 +261,7 @@ static int __init tx4939_rtc_probe(struct platform_device *pdev)
if (!pdata->rtcreg)
return -EBUSY;
+ spin_lock_init(&pdata->lock);
tx4939_rtc_cmd(pdata->rtcreg, TX4939_RTCCTL_COMMAND_NOP);
if (devm_request_irq(&pdev->dev, irq, tx4939_rtc_interrupt,
IRQF_DISABLED, pdev->name, &pdev->dev) < 0)
@@ -277,14 +280,12 @@ static int __init tx4939_rtc_probe(struct platform_device *pdev)
static int __exit tx4939_rtc_remove(struct platform_device *pdev)
{
struct tx4939rtc_plat_data *pdata = platform_get_drvdata(pdev);
- struct rtc_device *rtc = pdata->rtc;
- spin_lock_irq(&rtc->irq_lock);
- tx4939_rtc_cmd(pdata->rtcreg, TX4939_RTCCTL_COMMAND_NOP);
- spin_unlock_irq(&rtc->irq_lock);
sysfs_remove_bin_file(&pdev->dev.kobj, &tx4939_rtc_nvram_attr);
- rtc_device_unregister(rtc);
- platform_set_drvdata(pdev, NULL);
+ rtc_device_unregister(pdata->rtc);
+ spin_lock_irq(&pdata->lock);
+ tx4939_rtc_cmd(pdata->rtcreg, TX4939_RTCCTL_COMMAND_NOP);
+ spin_unlock_irq(&pdata->lock);
return 0;
}
diff --git a/drivers/rtc/rtc-v3020.c b/drivers/rtc/rtc-v3020.c
index ad741afd47d..bed4cab0704 100644
--- a/drivers/rtc/rtc-v3020.c
+++ b/drivers/rtc/rtc-v3020.c
@@ -304,7 +304,6 @@ static int rtc_probe(struct platform_device *pdev)
{
struct v3020_platform_data *pdata = pdev->dev.platform_data;
struct v3020 *chip;
- struct rtc_device *rtc;
int retval = -EBUSY;
int i;
int temp;
@@ -353,13 +352,12 @@ static int rtc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, chip);
- rtc = rtc_device_register("v3020",
+ chip->rtc = rtc_device_register("v3020",
&pdev->dev, &v3020_rtc_ops, THIS_MODULE);
- if (IS_ERR(rtc)) {
- retval = PTR_ERR(rtc);
+ if (IS_ERR(chip->rtc)) {
+ retval = PTR_ERR(chip->rtc);
goto err_io;
}
- chip->rtc = rtc;
return 0;
diff --git a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c
index fadddac1e5a..c3244244e8c 100644
--- a/drivers/rtc/rtc-vr41xx.c
+++ b/drivers/rtc/rtc-vr41xx.c
@@ -327,7 +327,7 @@ static int __devinit rtc_probe(struct platform_device *pdev)
if (!res)
return -EBUSY;
- rtc1_base = ioremap(res->start, res->end - res->start + 1);
+ rtc1_base = ioremap(res->start, resource_size(res));
if (!rtc1_base)
return -EBUSY;
@@ -337,7 +337,7 @@ static int __devinit rtc_probe(struct platform_device *pdev)
goto err_rtc1_iounmap;
}
- rtc2_base = ioremap(res->start, res->end - res->start + 1);
+ rtc2_base = ioremap(res->start, resource_size(res));
if (!rtc2_base) {
retval = -EBUSY;
goto err_rtc1_iounmap;
diff --git a/drivers/rtc/rtc-wm8350.c b/drivers/rtc/rtc-wm8350.c
index f16486635a8..f1e440521c5 100644
--- a/drivers/rtc/rtc-wm8350.c
+++ b/drivers/rtc/rtc-wm8350.c
@@ -354,8 +354,9 @@ static const struct rtc_class_ops wm8350_rtc_ops = {
};
#ifdef CONFIG_PM
-static int wm8350_rtc_suspend(struct platform_device *pdev, pm_message_t state)
+static int wm8350_rtc_suspend(struct device *dev)
{
+ struct platform_device *pdev = to_platform_device(dev);
struct wm8350 *wm8350 = dev_get_drvdata(&pdev->dev);
int ret = 0;
u16 reg;
@@ -373,8 +374,9 @@ static int wm8350_rtc_suspend(struct platform_device *pdev, pm_message_t state)
return ret;
}
-static int wm8350_rtc_resume(struct platform_device *pdev)
+static int wm8350_rtc_resume(struct device *dev)
{
+ struct platform_device *pdev = to_platform_device(dev);
struct wm8350 *wm8350 = dev_get_drvdata(&pdev->dev);
int ret;
@@ -484,13 +486,17 @@ static int __devexit wm8350_rtc_remove(struct platform_device *pdev)
return 0;
}
+static struct dev_pm_ops wm8350_rtc_pm_ops = {
+ .suspend = wm8350_rtc_suspend,
+ .resume = wm8350_rtc_resume,
+};
+
static struct platform_driver wm8350_rtc_driver = {
.probe = wm8350_rtc_probe,
.remove = __devexit_p(wm8350_rtc_remove),
- .suspend = wm8350_rtc_suspend,
- .resume = wm8350_rtc_resume,
.driver = {
.name = "wm8350-rtc",
+ .pm = &wm8350_rtc_pm_ops,
},
};
diff --git a/drivers/rtc/rtc-x1205.c b/drivers/rtc/rtc-x1205.c
index 6583c1a8b07..9aae49139a0 100644
--- a/drivers/rtc/rtc-x1205.c
+++ b/drivers/rtc/rtc-x1205.c
@@ -155,11 +155,11 @@ static int x1205_get_status(struct i2c_client *client, unsigned char *sr)
}
static int x1205_set_datetime(struct i2c_client *client, struct rtc_time *tm,
- int datetoo, u8 reg_base, unsigned char alm_enable)
+ u8 reg_base, unsigned char alm_enable)
{
- int i, xfer, nbytes;
- unsigned char buf[8];
+ int i, xfer;
unsigned char rdata[10] = { 0, reg_base };
+ unsigned char *buf = rdata + 2;
static const unsigned char wel[3] = { 0, X1205_REG_SR,
X1205_SR_WEL };
@@ -170,9 +170,9 @@ static int x1205_set_datetime(struct i2c_client *client, struct rtc_time *tm,
static const unsigned char diswe[3] = { 0, X1205_REG_SR, 0 };
dev_dbg(&client->dev,
- "%s: secs=%d, mins=%d, hours=%d\n",
- __func__,
- tm->tm_sec, tm->tm_min, tm->tm_hour);
+ "%s: sec=%d min=%d hour=%d mday=%d mon=%d year=%d wday=%d\n",
+ __func__, tm->tm_sec, tm->tm_min, tm->tm_hour, tm->tm_mday,
+ tm->tm_mon, tm->tm_year, tm->tm_wday);
buf[CCR_SEC] = bin2bcd(tm->tm_sec);
buf[CCR_MIN] = bin2bcd(tm->tm_min);
@@ -180,23 +180,15 @@ static int x1205_set_datetime(struct i2c_client *client, struct rtc_time *tm,
/* set hour and 24hr bit */
buf[CCR_HOUR] = bin2bcd(tm->tm_hour) | X1205_HR_MIL;
- /* should we also set the date? */
- if (datetoo) {
- dev_dbg(&client->dev,
- "%s: mday=%d, mon=%d, year=%d, wday=%d\n",
- __func__,
- tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday);
+ buf[CCR_MDAY] = bin2bcd(tm->tm_mday);
- buf[CCR_MDAY] = bin2bcd(tm->tm_mday);
+ /* month, 1 - 12 */
+ buf[CCR_MONTH] = bin2bcd(tm->tm_mon + 1);
- /* month, 1 - 12 */
- buf[CCR_MONTH] = bin2bcd(tm->tm_mon + 1);
-
- /* year, since the rtc epoch*/
- buf[CCR_YEAR] = bin2bcd(tm->tm_year % 100);
- buf[CCR_WDAY] = tm->tm_wday & 0x07;
- buf[CCR_Y2K] = bin2bcd((tm->tm_year + 1900) / 100);
- }
+ /* year, since the rtc epoch*/
+ buf[CCR_YEAR] = bin2bcd(tm->tm_year % 100);
+ buf[CCR_WDAY] = tm->tm_wday & 0x07;
+ buf[CCR_Y2K] = bin2bcd((tm->tm_year + 1900) / 100);
/* If writing alarm registers, set compare bits on registers 0-4 */
if (reg_base < X1205_CCR_BASE)
@@ -214,17 +206,8 @@ static int x1205_set_datetime(struct i2c_client *client, struct rtc_time *tm,
return -EIO;
}
-
- /* write register's data */
- if (datetoo)
- nbytes = 8;
- else
- nbytes = 3;
- for (i = 0; i < nbytes; i++)
- rdata[2+i] = buf[i];
-
- xfer = i2c_master_send(client, rdata, nbytes+2);
- if (xfer != nbytes+2) {
+ xfer = i2c_master_send(client, rdata, sizeof(rdata));
+ if (xfer != sizeof(rdata)) {
dev_err(&client->dev,
"%s: result=%d addr=%02x, data=%02x\n",
__func__,
@@ -282,7 +265,7 @@ static int x1205_fix_osc(struct i2c_client *client)
memset(&tm, 0, sizeof(tm));
- err = x1205_set_datetime(client, &tm, 1, X1205_CCR_BASE, 0);
+ err = x1205_set_datetime(client, &tm, X1205_CCR_BASE, 0);
if (err < 0)
dev_err(&client->dev, "unable to restart the oscillator\n");
@@ -481,7 +464,7 @@ static int x1205_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
static int x1205_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
return x1205_set_datetime(to_i2c_client(dev),
- &alrm->time, 1, X1205_ALM0_BASE, alrm->enabled);
+ &alrm->time, X1205_ALM0_BASE, alrm->enabled);
}
static int x1205_rtc_read_time(struct device *dev, struct rtc_time *tm)
@@ -493,7 +476,7 @@ static int x1205_rtc_read_time(struct device *dev, struct rtc_time *tm)
static int x1205_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
return x1205_set_datetime(to_i2c_client(dev),
- tm, 1, X1205_CCR_BASE, 0);
+ tm, X1205_CCR_BASE, 0);
}
static int x1205_rtc_proc(struct device *dev, struct seq_file *seq)
diff --git a/drivers/usb/host/isp1362-hcd.c b/drivers/usb/host/isp1362-hcd.c
index 5c774ab9825..73352f3739b 100644
--- a/drivers/usb/host/isp1362-hcd.c
+++ b/drivers/usb/host/isp1362-hcd.c
@@ -80,7 +80,7 @@
#include
#include
#include
-#include
+#include
#include
#include
@@ -190,10 +190,8 @@ static int claim_ptd_buffers(struct isp1362_ep_queue *epq,
struct isp1362_ep *ep, u16 len)
{
int ptd_offset = -EINVAL;
- int index;
int num_ptds = ((len + PTD_HEADER_SIZE - 1) / epq->blk_size) + 1;
- int found = -1;
- int last = -1;
+ int found;
BUG_ON(len > epq->buf_size);
@@ -205,20 +203,9 @@ static int claim_ptd_buffers(struct isp1362_ep_queue *epq,
epq->name, len, epq->blk_size, num_ptds, epq->buf_map, epq->skip_map);
BUG_ON(ep->num_ptds != 0);
- for (index = 0; index <= epq->buf_count - num_ptds; index++) {
- if (test_bit(index, &epq->buf_map))
- continue;
- found = index;
- for (last = index + 1; last < index + num_ptds; last++) {
- if (test_bit(last, &epq->buf_map)) {
- found = -1;
- break;
- }
- }
- if (found >= 0)
- break;
- }
- if (found < 0)
+ found = bitmap_find_next_zero_area(&epq->buf_map, epq->buf_count, 0,
+ num_ptds, 0);
+ if (found >= epq->buf_count)
return -EOVERFLOW;
DBG(1, "%s: Found %d PTDs[%d] for %d/%d byte\n", __func__,
@@ -230,8 +217,7 @@ static int claim_ptd_buffers(struct isp1362_ep_queue *epq,
epq->buf_avail -= num_ptds;
BUG_ON(epq->buf_avail > epq->buf_count);
ep->ptd_index = found;
- for (index = found; index < last; index++)
- __set_bit(index, &epq->buf_map);
+ bitmap_set(&epq->buf_map, found, num_ptds);
DBG(1, "%s: Done %s PTD[%d] $%04x, avail %d count %d claimed %d %08lx:%08lx\n",
__func__, epq->name, ep->ptd_index, ep->ptd_offset,
epq->buf_avail, epq->buf_count, num_ptds, epq->buf_map, epq->skip_map);
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 99c0df1c7eb..5a5c303a637 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -614,6 +614,21 @@ config FB_BFIN_T350MCQB
This display is a QVGA 320x240 24-bit RGB display interfaced by an 8-bit wide PPI
It uses PPI[0..7] PPI_FS1, PPI_FS2 and PPI_CLK.
+config FB_BFIN_LQ035Q1
+ tristate "SHARP LQ035Q1DH02 TFT LCD"
+ depends on FB && BLACKFIN && SPI
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ select BFIN_GPTIMERS
+ help
+ This is the framebuffer device driver for a SHARP LQ035Q1DH02 TFT display found on
+ the Blackfin Landscape LCD EZ-Extender Card.
+ This display is a QVGA 320x240 18-bit RGB display interfaced by an 16-bit wide PPI
+ It uses PPI[0..15] PPI_FS1, PPI_FS2 and PPI_CLK.
+
+ To compile this driver as a module, choose M here: the
+ module will be called bfin-lq035q1-fb.
config FB_STI
tristate "HP STI frame buffer device support"
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index 0f8da331ba0..4ecb30c4f3f 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -137,6 +137,7 @@ obj-$(CONFIG_FB_EFI) += efifb.o
obj-$(CONFIG_FB_VGA16) += vga16fb.o
obj-$(CONFIG_FB_OF) += offb.o
obj-$(CONFIG_FB_BF54X_LQ043) += bf54x-lq043fb.o
+obj-$(CONFIG_FB_BFIN_LQ035Q1) += bfin-lq035q1-fb.o
obj-$(CONFIG_FB_BFIN_T350MCQB) += bfin-t350mcqb-fb.o
obj-$(CONFIG_FB_MX3) += mx3fb.o
obj-$(CONFIG_FB_DA8XX) += da8xx-fb.o
diff --git a/drivers/video/atafb.c b/drivers/video/atafb.c
index b7687c55fe1..2051c9dc813 100644
--- a/drivers/video/atafb.c
+++ b/drivers/video/atafb.c
@@ -2245,6 +2245,9 @@ static int ext_setcolreg(unsigned int regno, unsigned int red,
if (regno > 255)
return 1;
+ if (regno > 255)
+ return 1;
+
switch (external_card_type) {
case IS_VGA:
OUTB(0x3c8, regno);
diff --git a/drivers/video/bfin-lq035q1-fb.c b/drivers/video/bfin-lq035q1-fb.c
new file mode 100644
index 00000000000..b690c269784
--- /dev/null
+++ b/drivers/video/bfin-lq035q1-fb.c
@@ -0,0 +1,826 @@
+/*
+ * Blackfin LCD Framebuffer driver SHARP LQ035Q1DH02
+ *
+ * Copyright 2008-2009 Analog Devices Inc.
+ * Licensed under the GPL-2 or later.
+ */
+
+#define DRIVER_NAME "bfin-lq035q1"
+#define pr_fmt(fmt) DRIVER_NAME ": " fmt
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+#include
+#include
+#include
+
+#include
+
+#if defined(BF533_FAMILY) || defined(BF538_FAMILY)
+#define TIMER_HSYNC_id TIMER1_id
+#define TIMER_HSYNCbit TIMER1bit
+#define TIMER_HSYNC_STATUS_TRUN TIMER_STATUS_TRUN1
+#define TIMER_HSYNC_STATUS_TIMIL TIMER_STATUS_TIMIL1
+#define TIMER_HSYNC_STATUS_TOVF TIMER_STATUS_TOVF1
+
+#define TIMER_VSYNC_id TIMER2_id
+#define TIMER_VSYNCbit TIMER2bit
+#define TIMER_VSYNC_STATUS_TRUN TIMER_STATUS_TRUN2
+#define TIMER_VSYNC_STATUS_TIMIL TIMER_STATUS_TIMIL2
+#define TIMER_VSYNC_STATUS_TOVF TIMER_STATUS_TOVF2
+#else
+#define TIMER_HSYNC_id TIMER0_id
+#define TIMER_HSYNCbit TIMER0bit
+#define TIMER_HSYNC_STATUS_TRUN TIMER_STATUS_TRUN0
+#define TIMER_HSYNC_STATUS_TIMIL TIMER_STATUS_TIMIL0
+#define TIMER_HSYNC_STATUS_TOVF TIMER_STATUS_TOVF0
+
+#define TIMER_VSYNC_id TIMER1_id
+#define TIMER_VSYNCbit TIMER1bit
+#define TIMER_VSYNC_STATUS_TRUN TIMER_STATUS_TRUN1
+#define TIMER_VSYNC_STATUS_TIMIL TIMER_STATUS_TIMIL1
+#define TIMER_VSYNC_STATUS_TOVF TIMER_STATUS_TOVF1
+#endif
+
+#define LCD_X_RES 320 /* Horizontal Resolution */
+#define LCD_Y_RES 240 /* Vertical Resolution */
+#define DMA_BUS_SIZE 16
+
+#define USE_RGB565_16_BIT_PPI
+
+#ifdef USE_RGB565_16_BIT_PPI
+#define LCD_BPP 16 /* Bit Per Pixel */
+#define CLOCKS_PER_PIX 1
+#define CPLD_PIPELINE_DELAY_COR 0 /* NO CPLB */
+#endif
+
+/* Interface 16/18-bit TFT over an 8-bit wide PPI using a small Programmable Logic Device (CPLD)
+ * http://blackfin.uclinux.org/gf/project/stamp/frs/?action=FrsReleaseBrowse&frs_package_id=165
+ */
+
+#ifdef USE_RGB565_8_BIT_PPI
+#define LCD_BPP 16 /* Bit Per Pixel */
+#define CLOCKS_PER_PIX 2
+#define CPLD_PIPELINE_DELAY_COR 3 /* RGB565 */
+#endif
+
+#ifdef USE_RGB888_8_BIT_PPI
+#define LCD_BPP 24 /* Bit Per Pixel */
+#define CLOCKS_PER_PIX 3
+#define CPLD_PIPELINE_DELAY_COR 5 /* RGB888 */
+#endif
+
+ /*
+ * HS and VS timing parameters (all in number of PPI clk ticks)
+ */
+
+#define U_LINE 4 /* Blanking Lines */
+
+#define H_ACTPIX (LCD_X_RES * CLOCKS_PER_PIX) /* active horizontal pixel */
+#define H_PERIOD (336 * CLOCKS_PER_PIX) /* HS period */
+#define H_PULSE (2 * CLOCKS_PER_PIX) /* HS pulse width */
+#define H_START (7 * CLOCKS_PER_PIX + CPLD_PIPELINE_DELAY_COR) /* first valid pixel */
+
+#define V_LINES (LCD_Y_RES + U_LINE) /* total vertical lines */
+#define V_PULSE (2 * CLOCKS_PER_PIX) /* VS pulse width (1-5 H_PERIODs) */
+#define V_PERIOD (H_PERIOD * V_LINES) /* VS period */
+
+#define ACTIVE_VIDEO_MEM_OFFSET ((U_LINE / 2) * LCD_X_RES * (LCD_BPP / 8))
+
+#define BFIN_LCD_NBR_PALETTE_ENTRIES 256
+
+#define PPI_TX_MODE 0x2
+#define PPI_XFER_TYPE_11 0xC
+#define PPI_PORT_CFG_01 0x10
+#define PPI_POLS_1 0x8000
+
+#if (CLOCKS_PER_PIX > 1)
+#define PPI_PMODE (DLEN_8 | PACK_EN)
+#else
+#define PPI_PMODE (DLEN_16)
+#endif
+
+#define LQ035_INDEX 0x74
+#define LQ035_DATA 0x76
+
+#define LQ035_DRIVER_OUTPUT_CTL 0x1
+#define LQ035_SHUT_CTL 0x11
+
+#define LQ035_DRIVER_OUTPUT_MASK (LQ035_LR | LQ035_TB | LQ035_BGR | LQ035_REV)
+#define LQ035_DRIVER_OUTPUT_DEFAULT (0x2AEF & ~LQ035_DRIVER_OUTPUT_MASK)
+
+#define LQ035_SHUT (1 << 0) /* Shutdown */
+#define LQ035_ON (0 << 0) /* Shutdown */
+
+struct bfin_lq035q1fb_info {
+ struct fb_info *fb;
+ struct device *dev;
+ struct spi_driver spidrv;
+ struct bfin_lq035q1fb_disp_info *disp_info;
+ unsigned char *fb_buffer; /* RGB Buffer */
+ dma_addr_t dma_handle;
+ int lq035_open_cnt;
+ int irq;
+ spinlock_t lock; /* lock */
+ u32 pseudo_pal[16];
+};
+
+static int nocursor;
+module_param(nocursor, int, 0644);
+MODULE_PARM_DESC(nocursor, "cursor enable/disable");
+
+struct spi_control {
+ unsigned short mode;
+};
+
+static int lq035q1_control(struct spi_device *spi, unsigned char reg, unsigned short value)
+{
+ int ret;
+ u8 regs[3] = { LQ035_INDEX, 0, 0 };
+ u8 dat[3] = { LQ035_DATA, 0, 0 };
+
+ if (!spi)
+ return -ENODEV;
+
+ regs[2] = reg;
+ dat[1] = value >> 8;
+ dat[2] = value & 0xFF;
+
+ ret = spi_write(spi, regs, ARRAY_SIZE(regs));
+ ret |= spi_write(spi, dat, ARRAY_SIZE(dat));
+ return ret;
+}
+
+static int __devinit lq035q1_spidev_probe(struct spi_device *spi)
+{
+ int ret;
+ struct spi_control *ctl;
+ struct bfin_lq035q1fb_info *info = container_of(spi->dev.driver,
+ struct bfin_lq035q1fb_info,
+ spidrv.driver);
+
+ ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
+
+ if (!ctl)
+ return -ENOMEM;
+
+ ctl->mode = (info->disp_info->mode &
+ LQ035_DRIVER_OUTPUT_MASK) | LQ035_DRIVER_OUTPUT_DEFAULT;
+
+ ret = lq035q1_control(spi, LQ035_SHUT_CTL, LQ035_ON);
+ ret |= lq035q1_control(spi, LQ035_DRIVER_OUTPUT_CTL, ctl->mode);
+ if (ret)
+ return ret;
+
+ spi_set_drvdata(spi, ctl);
+
+ return 0;
+}
+
+static int lq035q1_spidev_remove(struct spi_device *spi)
+{
+ return lq035q1_control(spi, LQ035_SHUT_CTL, LQ035_SHUT);
+}
+
+#ifdef CONFIG_PM
+static int lq035q1_spidev_suspend(struct spi_device *spi, pm_message_t state)
+{
+ return lq035q1_control(spi, LQ035_SHUT_CTL, LQ035_SHUT);
+}
+
+static int lq035q1_spidev_resume(struct spi_device *spi)
+{
+ int ret;
+ struct spi_control *ctl = spi_get_drvdata(spi);
+
+ ret = lq035q1_control(spi, LQ035_DRIVER_OUTPUT_CTL, ctl->mode);
+ if (ret)
+ return ret;
+
+ return lq035q1_control(spi, LQ035_SHUT_CTL, LQ035_ON);
+}
+#else
+# define lq035q1_spidev_suspend NULL
+# define lq035q1_spidev_resume NULL
+#endif
+
+/* Power down all displays on reboot, poweroff or halt */
+static void lq035q1_spidev_shutdown(struct spi_device *spi)
+{
+ lq035q1_control(spi, LQ035_SHUT_CTL, LQ035_SHUT);
+}
+
+static int lq035q1_backlight(struct bfin_lq035q1fb_info *info, unsigned arg)
+{
+ if (info->disp_info->use_bl)
+ gpio_set_value(info->disp_info->gpio_bl, arg);
+
+ return 0;
+}
+
+static void bfin_lq035q1_config_ppi(struct bfin_lq035q1fb_info *fbi)
+{
+ bfin_write_PPI_DELAY(H_START);
+ bfin_write_PPI_COUNT(H_ACTPIX - 1);
+ bfin_write_PPI_FRAME(V_LINES);
+
+ bfin_write_PPI_CONTROL(PPI_TX_MODE | /* output mode , PORT_DIR */
+ PPI_XFER_TYPE_11 | /* sync mode XFR_TYPE */
+ PPI_PORT_CFG_01 | /* two frame sync PORT_CFG */
+ PPI_PMODE | /* 8/16 bit data length / PACK_EN? */
+ PPI_POLS_1); /* faling edge syncs POLS */
+}
+
+static inline void bfin_lq035q1_disable_ppi(void)
+{
+ bfin_write_PPI_CONTROL(bfin_read_PPI_CONTROL() & ~PORT_EN);
+}
+
+static inline void bfin_lq035q1_enable_ppi(void)
+{
+ bfin_write_PPI_CONTROL(bfin_read_PPI_CONTROL() | PORT_EN);
+}
+
+static void bfin_lq035q1_start_timers(void)
+{
+ enable_gptimers(TIMER_VSYNCbit | TIMER_HSYNCbit);
+}
+
+static void bfin_lq035q1_stop_timers(void)
+{
+ disable_gptimers(TIMER_HSYNCbit | TIMER_VSYNCbit);
+
+ set_gptimer_status(0, TIMER_HSYNC_STATUS_TRUN | TIMER_VSYNC_STATUS_TRUN |
+ TIMER_HSYNC_STATUS_TIMIL | TIMER_VSYNC_STATUS_TIMIL |
+ TIMER_HSYNC_STATUS_TOVF | TIMER_VSYNC_STATUS_TOVF);
+
+}
+
+static void bfin_lq035q1_init_timers(void)
+{
+
+ bfin_lq035q1_stop_timers();
+
+ set_gptimer_period(TIMER_HSYNC_id, H_PERIOD);
+ set_gptimer_pwidth(TIMER_HSYNC_id, H_PULSE);
+ set_gptimer_config(TIMER_HSYNC_id, TIMER_MODE_PWM | TIMER_PERIOD_CNT |
+ TIMER_TIN_SEL | TIMER_CLK_SEL|
+ TIMER_EMU_RUN);
+
+ set_gptimer_period(TIMER_VSYNC_id, V_PERIOD);
+ set_gptimer_pwidth(TIMER_VSYNC_id, V_PULSE);
+ set_gptimer_config(TIMER_VSYNC_id, TIMER_MODE_PWM | TIMER_PERIOD_CNT |
+ TIMER_TIN_SEL | TIMER_CLK_SEL |
+ TIMER_EMU_RUN);
+
+}
+
+static void bfin_lq035q1_config_dma(struct bfin_lq035q1fb_info *fbi)
+{
+
+ set_dma_config(CH_PPI,
+ set_bfin_dma_config(DIR_READ, DMA_FLOW_AUTO,
+ INTR_DISABLE, DIMENSION_2D,
+ DATA_SIZE_16,
+ DMA_NOSYNC_KEEP_DMA_BUF));
+ set_dma_x_count(CH_PPI, (LCD_X_RES * LCD_BPP) / DMA_BUS_SIZE);
+ set_dma_x_modify(CH_PPI, DMA_BUS_SIZE / 8);
+ set_dma_y_count(CH_PPI, V_LINES);
+
+ set_dma_y_modify(CH_PPI, DMA_BUS_SIZE / 8);
+ set_dma_start_addr(CH_PPI, (unsigned long)fbi->fb_buffer);
+
+}
+
+#if (CLOCKS_PER_PIX == 1)
+static const u16 ppi0_req_16[] = {P_PPI0_CLK, P_PPI0_FS1, P_PPI0_FS2,
+ P_PPI0_D0, P_PPI0_D1, P_PPI0_D2,
+ P_PPI0_D3, P_PPI0_D4, P_PPI0_D5,
+ P_PPI0_D6, P_PPI0_D7, P_PPI0_D8,
+ P_PPI0_D9, P_PPI0_D10, P_PPI0_D11,
+ P_PPI0_D12, P_PPI0_D13, P_PPI0_D14,
+ P_PPI0_D15, 0};
+#else
+static const u16 ppi0_req_16[] = {P_PPI0_CLK, P_PPI0_FS1, P_PPI0_FS2,
+ P_PPI0_D0, P_PPI0_D1, P_PPI0_D2,
+ P_PPI0_D3, P_PPI0_D4, P_PPI0_D5,
+ P_PPI0_D6, P_PPI0_D7, 0};
+#endif
+
+static inline void bfin_lq035q1_free_ports(void)
+{
+ peripheral_free_list(ppi0_req_16);
+ if (ANOMALY_05000400)
+ gpio_free(P_IDENT(P_PPI0_FS3));
+}
+
+static int __devinit bfin_lq035q1_request_ports(struct platform_device *pdev)
+{
+ /* ANOMALY_05000400 - PPI Does Not Start Properly In Specific Mode:
+ * Drive PPI_FS3 Low
+ */
+ if (ANOMALY_05000400) {
+ int ret = gpio_request(P_IDENT(P_PPI0_FS3), "PPI_FS3");
+ if (ret)
+ return ret;
+ gpio_direction_output(P_IDENT(P_PPI0_FS3), 0);
+ }
+
+ if (peripheral_request_list(ppi0_req_16, DRIVER_NAME)) {
+ dev_err(&pdev->dev, "requesting peripherals failed\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int bfin_lq035q1_fb_open(struct fb_info *info, int user)
+{
+ struct bfin_lq035q1fb_info *fbi = info->par;
+
+ spin_lock(&fbi->lock);
+ fbi->lq035_open_cnt++;
+
+ if (fbi->lq035_open_cnt <= 1) {
+
+ bfin_lq035q1_disable_ppi();
+ SSYNC();
+
+ bfin_lq035q1_config_dma(fbi);
+ bfin_lq035q1_config_ppi(fbi);
+ bfin_lq035q1_init_timers();
+
+ /* start dma */
+ enable_dma(CH_PPI);
+ bfin_lq035q1_enable_ppi();
+ bfin_lq035q1_start_timers();
+ lq035q1_backlight(fbi, 1);
+ }
+
+ spin_unlock(&fbi->lock);
+
+ return 0;
+}
+
+static int bfin_lq035q1_fb_release(struct fb_info *info, int user)
+{
+ struct bfin_lq035q1fb_info *fbi = info->par;
+
+ spin_lock(&fbi->lock);
+
+ fbi->lq035_open_cnt--;
+
+ if (fbi->lq035_open_cnt <= 0) {
+ lq035q1_backlight(fbi, 0);
+ bfin_lq035q1_disable_ppi();
+ SSYNC();
+ disable_dma(CH_PPI);
+ bfin_lq035q1_stop_timers();
+ }
+
+ spin_unlock(&fbi->lock);
+
+ return 0;
+}
+
+static int bfin_lq035q1_fb_check_var(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ switch (var->bits_per_pixel) {
+#if (LCD_BPP == 24)
+ case 24:/* TRUECOLOUR, 16m */
+#else
+ case 16:/* DIRECTCOLOUR, 64k */
+#endif
+ var->red.offset = info->var.red.offset;
+ var->green.offset = info->var.green.offset;
+ var->blue.offset = info->var.blue.offset;
+ var->red.length = info->var.red.length;
+ var->green.length = info->var.green.length;
+ var->blue.length = info->var.blue.length;
+ var->transp.offset = 0;
+ var->transp.length = 0;
+ var->transp.msb_right = 0;
+ var->red.msb_right = 0;
+ var->green.msb_right = 0;
+ var->blue.msb_right = 0;
+ break;
+ default:
+ pr_debug("%s: depth not supported: %u BPP\n", __func__,
+ var->bits_per_pixel);
+ return -EINVAL;
+ }
+
+ if (info->var.xres != var->xres || info->var.yres != var->yres ||
+ info->var.xres_virtual != var->xres_virtual ||
+ info->var.yres_virtual != var->yres_virtual) {
+ pr_debug("%s: Resolution not supported: X%u x Y%u \n",
+ __func__, var->xres, var->yres);
+ return -EINVAL;
+ }
+
+ /*
+ * Memory limit
+ */
+
+ if ((info->fix.line_length * var->yres_virtual) > info->fix.smem_len) {
+ pr_debug("%s: Memory Limit requested yres_virtual = %u\n",
+ __func__, var->yres_virtual);
+ return -ENOMEM;
+ }
+
+
+ return 0;
+}
+
+int bfin_lq035q1_fb_cursor(struct fb_info *info, struct fb_cursor *cursor)
+{
+ if (nocursor)
+ return 0;
+ else
+ return -EINVAL; /* just to force soft_cursor() call */
+}
+
+static int bfin_lq035q1_fb_setcolreg(u_int regno, u_int red, u_int green,
+ u_int blue, u_int transp,
+ struct fb_info *info)
+{
+ if (regno >= BFIN_LCD_NBR_PALETTE_ENTRIES)
+ return -EINVAL;
+
+ if (info->var.grayscale) {
+ /* grayscale = 0.30*R + 0.59*G + 0.11*B */
+ red = green = blue = (red * 77 + green * 151 + blue * 28) >> 8;
+ }
+
+ if (info->fix.visual == FB_VISUAL_TRUECOLOR) {
+
+ u32 value;
+ /* Place color in the pseudopalette */
+ if (regno > 16)
+ return -EINVAL;
+
+ red >>= (16 - info->var.red.length);
+ green >>= (16 - info->var.green.length);
+ blue >>= (16 - info->var.blue.length);
+
+ value = (red << info->var.red.offset) |
+ (green << info->var.green.offset) |
+ (blue << info->var.blue.offset);
+ value &= 0xFFFFFF;
+
+ ((u32 *) (info->pseudo_palette))[regno] = value;
+
+ }
+
+ return 0;
+}
+
+static struct fb_ops bfin_lq035q1_fb_ops = {
+ .owner = THIS_MODULE,
+ .fb_open = bfin_lq035q1_fb_open,
+ .fb_release = bfin_lq035q1_fb_release,
+ .fb_check_var = bfin_lq035q1_fb_check_var,
+ .fb_fillrect = cfb_fillrect,
+ .fb_copyarea = cfb_copyarea,
+ .fb_imageblit = cfb_imageblit,
+ .fb_cursor = bfin_lq035q1_fb_cursor,
+ .fb_setcolreg = bfin_lq035q1_fb_setcolreg,
+};
+
+static irqreturn_t bfin_lq035q1_irq_error(int irq, void *dev_id)
+{
+ /*struct bfin_lq035q1fb_info *info = (struct bfin_lq035q1fb_info *)dev_id;*/
+
+ u16 status = bfin_read_PPI_STATUS();
+ bfin_write_PPI_STATUS(-1);
+
+ if (status) {
+ bfin_lq035q1_disable_ppi();
+ disable_dma(CH_PPI);
+
+ /* start dma */
+ enable_dma(CH_PPI);
+ bfin_lq035q1_enable_ppi();
+ bfin_write_PPI_STATUS(-1);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit bfin_lq035q1_probe(struct platform_device *pdev)
+{
+ struct bfin_lq035q1fb_info *info;
+ struct fb_info *fbinfo;
+ int ret;
+
+ ret = request_dma(CH_PPI, DRIVER_NAME"_CH_PPI");
+ if (ret < 0) {
+ dev_err(&pdev->dev, "PPI DMA unavailable\n");
+ goto out1;
+ }
+
+ fbinfo = framebuffer_alloc(sizeof(*info), &pdev->dev);
+ if (!fbinfo) {
+ ret = -ENOMEM;
+ goto out2;
+ }
+
+ info = fbinfo->par;
+ info->fb = fbinfo;
+ info->dev = &pdev->dev;
+
+ info->disp_info = pdev->dev.platform_data;
+
+ platform_set_drvdata(pdev, fbinfo);
+
+ strcpy(fbinfo->fix.id, DRIVER_NAME);
+
+ fbinfo->fix.type = FB_TYPE_PACKED_PIXELS;
+ fbinfo->fix.type_aux = 0;
+ fbinfo->fix.xpanstep = 0;
+ fbinfo->fix.ypanstep = 0;
+ fbinfo->fix.ywrapstep = 0;
+ fbinfo->fix.accel = FB_ACCEL_NONE;
+ fbinfo->fix.visual = FB_VISUAL_TRUECOLOR;
+
+ fbinfo->var.nonstd = 0;
+ fbinfo->var.activate = FB_ACTIVATE_NOW;
+ fbinfo->var.height = -1;
+ fbinfo->var.width = -1;
+ fbinfo->var.accel_flags = 0;
+ fbinfo->var.vmode = FB_VMODE_NONINTERLACED;
+
+ fbinfo->var.xres = LCD_X_RES;
+ fbinfo->var.xres_virtual = LCD_X_RES;
+ fbinfo->var.yres = LCD_Y_RES;
+ fbinfo->var.yres_virtual = LCD_Y_RES;
+ fbinfo->var.bits_per_pixel = LCD_BPP;
+
+ if (info->disp_info->mode & LQ035_BGR) {
+#if (LCD_BPP == 24)
+ fbinfo->var.red.offset = 0;
+ fbinfo->var.green.offset = 8;
+ fbinfo->var.blue.offset = 16;
+#else
+ fbinfo->var.red.offset = 0;
+ fbinfo->var.green.offset = 5;
+ fbinfo->var.blue.offset = 11;
+#endif
+ } else {
+#if (LCD_BPP == 24)
+ fbinfo->var.red.offset = 16;
+ fbinfo->var.green.offset = 8;
+ fbinfo->var.blue.offset = 0;
+#else
+ fbinfo->var.red.offset = 11;
+ fbinfo->var.green.offset = 5;
+ fbinfo->var.blue.offset = 0;
+#endif
+ }
+
+ fbinfo->var.transp.offset = 0;
+
+#if (LCD_BPP == 24)
+ fbinfo->var.red.length = 8;
+ fbinfo->var.green.length = 8;
+ fbinfo->var.blue.length = 8;
+#else
+ fbinfo->var.red.length = 5;
+ fbinfo->var.green.length = 6;
+ fbinfo->var.blue.length = 5;
+#endif
+
+ fbinfo->var.transp.length = 0;
+
+ fbinfo->fix.smem_len = LCD_X_RES * LCD_Y_RES * LCD_BPP / 8
+ + ACTIVE_VIDEO_MEM_OFFSET;
+
+ fbinfo->fix.line_length = fbinfo->var.xres_virtual *
+ fbinfo->var.bits_per_pixel / 8;
+
+
+ fbinfo->fbops = &bfin_lq035q1_fb_ops;
+ fbinfo->flags = FBINFO_FLAG_DEFAULT;
+
+ info->fb_buffer =
+ dma_alloc_coherent(NULL, fbinfo->fix.smem_len, &info->dma_handle,
+ GFP_KERNEL);
+
+ if (NULL == info->fb_buffer) {
+ dev_err(&pdev->dev, "couldn't allocate dma buffer\n");
+ ret = -ENOMEM;
+ goto out3;
+ }
+
+ fbinfo->screen_base = (void *)info->fb_buffer + ACTIVE_VIDEO_MEM_OFFSET;
+ fbinfo->fix.smem_start = (int)info->fb_buffer + ACTIVE_VIDEO_MEM_OFFSET;
+
+ fbinfo->fbops = &bfin_lq035q1_fb_ops;
+
+ fbinfo->pseudo_palette = &info->pseudo_pal;
+
+ ret = fb_alloc_cmap(&fbinfo->cmap, BFIN_LCD_NBR_PALETTE_ENTRIES, 0);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to allocate colormap (%d entries)\n",
+ BFIN_LCD_NBR_PALETTE_ENTRIES);
+ goto out4;
+ }
+
+ ret = bfin_lq035q1_request_ports(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "couldn't request gpio port\n");
+ goto out6;
+ }
+
+ info->irq = platform_get_irq(pdev, 0);
+ if (info->irq < 0) {
+ ret = -EINVAL;
+ goto out7;
+ }
+
+ ret = request_irq(info->irq, bfin_lq035q1_irq_error, IRQF_DISABLED,
+ DRIVER_NAME" PPI ERROR", info);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "unable to request PPI ERROR IRQ\n");
+ goto out7;
+ }
+
+ info->spidrv.driver.name = DRIVER_NAME"-spi";
+ info->spidrv.probe = lq035q1_spidev_probe;
+ info->spidrv.remove = __devexit_p(lq035q1_spidev_remove);
+ info->spidrv.shutdown = lq035q1_spidev_shutdown;
+ info->spidrv.suspend = lq035q1_spidev_suspend;
+ info->spidrv.resume = lq035q1_spidev_resume;
+
+ ret = spi_register_driver(&info->spidrv);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "couldn't register SPI Interface\n");
+ goto out8;
+ }
+
+ if (info->disp_info->use_bl) {
+ ret = gpio_request(info->disp_info->gpio_bl, "LQ035 Backlight");
+
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request GPIO %d\n",
+ info->disp_info->gpio_bl);
+ goto out9;
+ }
+ gpio_direction_output(info->disp_info->gpio_bl, 0);
+ }
+
+ ret = register_framebuffer(fbinfo);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "unable to register framebuffer\n");
+ goto out10;
+ }
+
+ dev_info(&pdev->dev, "%dx%d %d-bit RGB FrameBuffer initialized\n",
+ LCD_X_RES, LCD_Y_RES, LCD_BPP);
+
+ return 0;
+
+ out10:
+ if (info->disp_info->use_bl)
+ gpio_free(info->disp_info->gpio_bl);
+ out9:
+ spi_unregister_driver(&info->spidrv);
+ out8:
+ free_irq(info->irq, info);
+ out7:
+ bfin_lq035q1_free_ports();
+ out6:
+ fb_dealloc_cmap(&fbinfo->cmap);
+ out4:
+ dma_free_coherent(NULL, fbinfo->fix.smem_len, info->fb_buffer,
+ info->dma_handle);
+ out3:
+ framebuffer_release(fbinfo);
+ out2:
+ free_dma(CH_PPI);
+ out1:
+ platform_set_drvdata(pdev, NULL);
+
+ return ret;
+}
+
+static int __devexit bfin_lq035q1_remove(struct platform_device *pdev)
+{
+ struct fb_info *fbinfo = platform_get_drvdata(pdev);
+ struct bfin_lq035q1fb_info *info = fbinfo->par;
+
+ if (info->disp_info->use_bl)
+ gpio_free(info->disp_info->gpio_bl);
+
+ spi_unregister_driver(&info->spidrv);
+
+ unregister_framebuffer(fbinfo);
+
+ free_dma(CH_PPI);
+ free_irq(info->irq, info);
+
+ if (info->fb_buffer != NULL)
+ dma_free_coherent(NULL, fbinfo->fix.smem_len, info->fb_buffer,
+ info->dma_handle);
+
+ fb_dealloc_cmap(&fbinfo->cmap);
+
+ bfin_lq035q1_free_ports();
+
+ platform_set_drvdata(pdev, NULL);
+ framebuffer_release(fbinfo);
+
+ dev_info(&pdev->dev, "unregistered LCD driver\n");
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int bfin_lq035q1_suspend(struct device *dev)
+{
+ struct fb_info *fbinfo = dev_get_drvdata(dev);
+ struct bfin_lq035q1fb_info *info = fbinfo->par;
+
+ if (info->lq035_open_cnt) {
+ lq035q1_backlight(info, 0);
+ bfin_lq035q1_disable_ppi();
+ SSYNC();
+ disable_dma(CH_PPI);
+ bfin_lq035q1_stop_timers();
+ bfin_write_PPI_STATUS(-1);
+ }
+
+ return 0;
+}
+
+static int bfin_lq035q1_resume(struct device *dev)
+{
+ struct fb_info *fbinfo = dev_get_drvdata(dev);
+ struct bfin_lq035q1fb_info *info = fbinfo->par;
+
+ if (info->lq035_open_cnt) {
+ bfin_lq035q1_disable_ppi();
+ SSYNC();
+
+ bfin_lq035q1_config_dma(info);
+ bfin_lq035q1_config_ppi(info);
+ bfin_lq035q1_init_timers();
+
+ /* start dma */
+ enable_dma(CH_PPI);
+ bfin_lq035q1_enable_ppi();
+ bfin_lq035q1_start_timers();
+ lq035q1_backlight(info, 1);
+ }
+
+ return 0;
+}
+
+static struct dev_pm_ops bfin_lq035q1_dev_pm_ops = {
+ .suspend = bfin_lq035q1_suspend,
+ .resume = bfin_lq035q1_resume,
+};
+#endif
+
+static struct platform_driver bfin_lq035q1_driver = {
+ .probe = bfin_lq035q1_probe,
+ .remove = __devexit_p(bfin_lq035q1_remove),
+ .driver = {
+ .name = DRIVER_NAME,
+#ifdef CONFIG_PM
+ .pm = &bfin_lq035q1_dev_pm_ops,
+#endif
+ },
+};
+
+static int __init bfin_lq035q1_driver_init(void)
+{
+ return platform_driver_register(&bfin_lq035q1_driver);
+}
+module_init(bfin_lq035q1_driver_init);
+
+static void __exit bfin_lq035q1_driver_cleanup(void)
+{
+ platform_driver_unregister(&bfin_lq035q1_driver);
+}
+module_exit(bfin_lq035q1_driver_cleanup);
+
+MODULE_DESCRIPTION("Blackfin TFT LCD Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/bfin-t350mcqb-fb.c b/drivers/video/bfin-t350mcqb-fb.c
index 5cc36cfbf07..2549c53b26a 100644
--- a/drivers/video/bfin-t350mcqb-fb.c
+++ b/drivers/video/bfin-t350mcqb-fb.c
@@ -487,8 +487,8 @@ static int __devinit bfin_t350mcqb_probe(struct platform_device *pdev)
fbinfo->var.nonstd = 0;
fbinfo->var.activate = FB_ACTIVATE_NOW;
- fbinfo->var.height = -1;
- fbinfo->var.width = -1;
+ fbinfo->var.height = 53;
+ fbinfo->var.width = 70;
fbinfo->var.accel_flags = 0;
fbinfo->var.vmode = FB_VMODE_NONINTERLACED;
@@ -634,17 +634,35 @@ static int __devexit bfin_t350mcqb_remove(struct platform_device *pdev)
#ifdef CONFIG_PM
static int bfin_t350mcqb_suspend(struct platform_device *pdev, pm_message_t state)
{
- bfin_t350mcqb_disable_ppi();
- disable_dma(CH_PPI);
- bfin_write_PPI_STATUS(0xFFFF);
+ struct fb_info *fbinfo = platform_get_drvdata(pdev);
+ struct bfin_t350mcqbfb_info *fbi = fbinfo->par;
+
+ if (fbi->lq043_open_cnt) {
+ bfin_t350mcqb_disable_ppi();
+ disable_dma(CH_PPI);
+ bfin_t350mcqb_stop_timers();
+ bfin_write_PPI_STATUS(-1);
+ }
+
return 0;
}
static int bfin_t350mcqb_resume(struct platform_device *pdev)
{
- enable_dma(CH_PPI);
- bfin_t350mcqb_enable_ppi();
+ struct fb_info *fbinfo = platform_get_drvdata(pdev);
+ struct bfin_t350mcqbfb_info *fbi = fbinfo->par;
+
+ if (fbi->lq043_open_cnt) {
+ bfin_t350mcqb_config_dma(fbi);
+ bfin_t350mcqb_config_ppi(fbi);
+ bfin_t350mcqb_init_timers();
+
+ /* start dma */
+ enable_dma(CH_PPI);
+ bfin_t350mcqb_enable_ppi();
+ bfin_t350mcqb_start_timers();
+ }
return 0;
}
diff --git a/drivers/video/clps711xfb.c b/drivers/video/clps711xfb.c
index 16f5db471ab..99b354b8e25 100644
--- a/drivers/video/clps711xfb.c
+++ b/drivers/video/clps711xfb.c
@@ -19,8 +19,10 @@
*
* Framebuffer driver for the CLPS7111 and EP7212 processors.
*/
+#include
#include
#include
+#include
#include
#include
#include
@@ -38,14 +40,6 @@ struct fb_info *cfb;
#define CMAP_MAX_SIZE 16
-/* The /proc entry for the backlight. */
-static struct proc_dir_entry *clps7111fb_backlight_proc_entry = NULL;
-
-static int clps7111fb_proc_backlight_read(char *page, char **start, off_t off,
- int count, int *eof, void *data);
-static int clps7111fb_proc_backlight_write(struct file *file,
- const char *buffer, unsigned long count, void *data);
-
/*
* LCD AC Prescale. This comes from the LCD panel manufacturers specifications.
* This determines how many clocks + 1 of CL1 before the M signal toggles.
@@ -221,26 +215,23 @@ static struct fb_ops clps7111fb_ops = {
.fb_imageblit = cfb_imageblit,
};
-static int
-clps7111fb_proc_backlight_read(char *page, char **start, off_t off,
- int count, int *eof, void *data)
+static int backlight_proc_show(struct seq_file *m, void *v)
{
- /* We need at least two characters, one for the digit, and one for
- * the terminating NULL. */
- if (count < 2)
- return -EINVAL;
-
if (machine_is_edb7211()) {
- return sprintf(page, "%d\n",
+ seq_printf(m, "%d\n",
(clps_readb(PDDR) & EDB_PD3_LCDBL) ? 1 : 0);
}
return 0;
}
-static int
-clps7111fb_proc_backlight_write(struct file *file, const char *buffer,
- unsigned long count, void *data)
+static int backlight_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, backlight_proc_show, NULL);
+}
+
+static ssize_t backlight_proc_write(struct file *file, const char *buffer,
+ size_t count, loff_t *pos)
{
unsigned char char_value;
int value;
@@ -271,6 +262,15 @@ clps7111fb_proc_backlight_write(struct file *file, const char *buffer,
return count;
}
+static const struct file_operations backlight_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = backlight_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = backlight_proc_write,
+};
+
static void __init clps711x_guess_lcd_params(struct fb_info *info)
{
unsigned int lcdcon, syscon, size;
@@ -379,19 +379,11 @@ int __init clps711xfb_init(void)
fb_alloc_cmap(&cfb->cmap, CMAP_MAX_SIZE, 0);
- /* Register the /proc entries. */
- clps7111fb_backlight_proc_entry = create_proc_entry("backlight", 0444,
- NULL);
- if (clps7111fb_backlight_proc_entry == NULL) {
+ if (!proc_create("backlight", 0444, NULL, &backlight_proc_fops)) {
printk("Couldn't create the /proc entry for the backlight.\n");
return -EINVAL;
}
- clps7111fb_backlight_proc_entry->read_proc =
- &clps7111fb_proc_backlight_read;
- clps7111fb_backlight_proc_entry->write_proc =
- &clps7111fb_proc_backlight_write;
-
/*
* Power up the LCD
*/
diff --git a/drivers/video/da8xx-fb.c b/drivers/video/da8xx-fb.c
index ea1fd3f4751..369a5b3ac64 100644
--- a/drivers/video/da8xx-fb.c
+++ b/drivers/video/da8xx-fb.c
@@ -28,6 +28,8 @@
#include
#include
#include
+#include
+#include
#include