aha/arch/m68k/include/asm/dma_no.h
Sam Ravnborg 49148020bc m68k,m68knommu: merge header files
Merge header files for m68k and m68knommu to the single location:

    arch/m68k/include/asm

The majority of this patch was the result of the
script that is included in the changelog below.

The script was originally written by Arnd Bergman and
exten by me to cover a few more files.

When the header files differed the script uses the following:

The original m68k file is named <file>_mm.h  [mm for memory manager]
The m68knommu file is named <file>_no.h [no for no memory manager]

The files uses the following include guard:

This include gaurd works as the m68knommu toolchain set
the __uClinux__ symbol - so this should work in userspace too.

Merging the header files for m68k and m68knommu exposes the
(unexpected?) ABI differences thus it is easier to actually
identify these and thus to fix them.

The commit has been build tested with both a m68k and
a m68knommu toolchain - with success.

The commit has also been tested with "make headers_check"
and this patch fixes make headers_check for m68knommu.

The script used:
TARGET=arch/m68k/include/asm
SOURCE=arch/m68knommu/include/asm

INCLUDE="cachectl.h errno.h fcntl.h hwtest.h ioctls.h ipcbuf.h \
linkage.h math-emu.h md.h mman.h movs.h msgbuf.h openprom.h \
oplib.h poll.h posix_types.h resource.h rtc.h sembuf.h shmbuf.h \
shm.h shmparam.h socket.h sockios.h spinlock.h statfs.h stat.h \
termbits.h termios.h tlb.h types.h user.h"

EQUAL="auxvec.h cputime.h device.h emergency-restart.h futex.h \
ioctl.h irq_regs.h kdebug.h local.h mutex.h percpu.h \
sections.h topology.h"

NOMUUFILES="anchor.h bootstd.h coldfire.h commproc.h dbg.h \
elia.h flat.h m5206sim.h m520xsim.h m523xsim.h m5249sim.h \
m5272sim.h m527xsim.h m528xsim.h m5307sim.h m532xsim.h \
m5407sim.h m68360_enet.h m68360.h m68360_pram.h m68360_quicc.h \
m68360_regs.h MC68328.h MC68332.h MC68EZ328.h MC68VZ328.h \
mcfcache.h mcfdma.h mcfmbus.h mcfne.h mcfpci.h mcfpit.h \
mcfsim.h mcfsmc.h mcftimer.h mcfuart.h mcfwdebug.h \
nettel.h quicc_simple.h smp.h"

FILES="atomic.h bitops.h bootinfo.h bug.h bugs.h byteorder.h cache.h \
cacheflush.h checksum.h current.h delay.h div64.h \
dma-mapping.h dma.h elf.h entry.h fb.h fpu.h hardirq.h hw_irq.h io.h \
irq.h kmap_types.h machdep.h mc146818rtc.h mmu.h mmu_context.h \
module.h page.h page_offset.h param.h pci.h pgalloc.h \
pgtable.h processor.h ptrace.h scatterlist.h segment.h \
setup.h sigcontext.h siginfo.h signal.h string.h system.h swab.h \
thread_info.h timex.h tlbflush.h traps.h uaccess.h ucontext.h \
unaligned.h unistd.h"

mergefile() {
	BASE=${1%.h}
	git mv ${SOURCE}/$1 ${TARGET}/${BASE}_no.h
	git mv ${TARGET}/$1 ${TARGET}/${BASE}_mm.h

cat << EOF > ${TARGET}/$1
EOF

	git add ${TARGET}/$1
}

set -e

mkdir -p ${TARGET}

git mv include/asm-m68k/* ${TARGET}
rmdir include/asm-m68k

git rm ${SOURCE}/Kbuild
for F in $INCLUDE $EQUAL; do
	git rm ${SOURCE}/$F
done

for F in $NOMUUFILES; do
	git mv ${SOURCE}/$F ${TARGET}/$F
done

for F in $FILES ; do
	mergefile $F
done

rmdir arch/m68knommu/include/asm
rmdir arch/m68knommu/include

Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Signed-off-by: Sam Ravnborg <sam@ravnborg.org>
Signed-off-by: Greg Ungerer <gerg@uclinux.org>
2009-01-16 21:58:10 +10:00

494 lines
17 KiB
C

#ifndef _M68K_DMA_H
#define _M68K_DMA_H 1
//#define DMA_DEBUG 1
#ifdef CONFIG_COLDFIRE
/*
* ColdFire DMA Model:
* ColdFire DMA supports two forms of DMA: Single and Dual address. Single
* address mode emits a source address, and expects that the device will either
* pick up the data (DMA READ) or source data (DMA WRITE). This implies that
* the device will place data on the correct byte(s) of the data bus, as the
* memory transactions are always 32 bits. This implies that only 32 bit
* devices will find single mode transfers useful. Dual address DMA mode
* performs two cycles: source read and destination write. ColdFire will
* align the data so that the device will always get the correct bytes, thus
* is useful for 8 and 16 bit devices. This is the mode that is supported
* below.
*
* AUG/22/2000 : added support for 32-bit Dual-Address-Mode (K) 2000
* Oliver Kamphenkel (O.Kamphenkel@tu-bs.de)
*
* AUG/25/2000 : addad support for 8, 16 and 32-bit Single-Address-Mode (K)2000
* Oliver Kamphenkel (O.Kamphenkel@tu-bs.de)
*
* APR/18/2002 : added proper support for MCF5272 DMA controller.
* Arthur Shipkowski (art@videon-central.com)
*/
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
#include <asm/mcfdma.h>
/*
* Set number of channels of DMA on ColdFire for different implementations.
*/
#if defined(CONFIG_M5249) || defined(CONFIG_M5307) || defined(CONFIG_M5407) || \
defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x)
#define MAX_M68K_DMA_CHANNELS 4
#elif defined(CONFIG_M5272)
#define MAX_M68K_DMA_CHANNELS 1
#elif defined(CONFIG_M532x)
#define MAX_M68K_DMA_CHANNELS 0
#else
#define MAX_M68K_DMA_CHANNELS 2
#endif
extern unsigned int dma_base_addr[MAX_M68K_DMA_CHANNELS];
extern unsigned int dma_device_address[MAX_M68K_DMA_CHANNELS];
#if !defined(CONFIG_M5272)
#define DMA_MODE_WRITE_BIT 0x01 /* Memory/IO to IO/Memory select */
#define DMA_MODE_WORD_BIT 0x02 /* 8 or 16 bit transfers */
#define DMA_MODE_LONG_BIT 0x04 /* or 32 bit transfers */
#define DMA_MODE_SINGLE_BIT 0x08 /* single-address-mode */
/* I/O to memory, 8 bits, mode */
#define DMA_MODE_READ 0
/* memory to I/O, 8 bits, mode */
#define DMA_MODE_WRITE 1
/* I/O to memory, 16 bits, mode */
#define DMA_MODE_READ_WORD 2
/* memory to I/O, 16 bits, mode */
#define DMA_MODE_WRITE_WORD 3
/* I/O to memory, 32 bits, mode */
#define DMA_MODE_READ_LONG 4
/* memory to I/O, 32 bits, mode */
#define DMA_MODE_WRITE_LONG 5
/* I/O to memory, 8 bits, single-address-mode */
#define DMA_MODE_READ_SINGLE 8
/* memory to I/O, 8 bits, single-address-mode */
#define DMA_MODE_WRITE_SINGLE 9
/* I/O to memory, 16 bits, single-address-mode */
#define DMA_MODE_READ_WORD_SINGLE 10
/* memory to I/O, 16 bits, single-address-mode */
#define DMA_MODE_WRITE_WORD_SINGLE 11
/* I/O to memory, 32 bits, single-address-mode */
#define DMA_MODE_READ_LONG_SINGLE 12
/* memory to I/O, 32 bits, single-address-mode */
#define DMA_MODE_WRITE_LONG_SINGLE 13
#else /* CONFIG_M5272 is defined */
/* Source static-address mode */
#define DMA_MODE_SRC_SA_BIT 0x01
/* Two bits to select between all four modes */
#define DMA_MODE_SSIZE_MASK 0x06
/* Offset to shift bits in */
#define DMA_MODE_SSIZE_OFF 0x01
/* Destination static-address mode */
#define DMA_MODE_DES_SA_BIT 0x10
/* Two bits to select between all four modes */
#define DMA_MODE_DSIZE_MASK 0x60
/* Offset to shift bits in */
#define DMA_MODE_DSIZE_OFF 0x05
/* Size modifiers */
#define DMA_MODE_SIZE_LONG 0x00
#define DMA_MODE_SIZE_BYTE 0x01
#define DMA_MODE_SIZE_WORD 0x02
#define DMA_MODE_SIZE_LINE 0x03
/*
* Aliases to help speed quick ports; these may be suboptimal, however. They
* do not include the SINGLE mode modifiers since the MCF5272 does not have a
* mode where the device is in control of its addressing.
*/
/* I/O to memory, 8 bits, mode */
#define DMA_MODE_READ ((DMA_MODE_SIZE_BYTE << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_BYTE << DMA_MODE_SSIZE_OFF) | DMA_SRC_SA_BIT)
/* memory to I/O, 8 bits, mode */
#define DMA_MODE_WRITE ((DMA_MODE_SIZE_BYTE << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_BYTE << DMA_MODE_SSIZE_OFF) | DMA_DES_SA_BIT)
/* I/O to memory, 16 bits, mode */
#define DMA_MODE_READ_WORD ((DMA_MODE_SIZE_WORD << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_WORD << DMA_MODE_SSIZE_OFF) | DMA_SRC_SA_BIT)
/* memory to I/O, 16 bits, mode */
#define DMA_MODE_WRITE_WORD ((DMA_MODE_SIZE_WORD << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_WORD << DMA_MODE_SSIZE_OFF) | DMA_DES_SA_BIT)
/* I/O to memory, 32 bits, mode */
#define DMA_MODE_READ_LONG ((DMA_MODE_SIZE_LONG << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_LONG << DMA_MODE_SSIZE_OFF) | DMA_SRC_SA_BIT)
/* memory to I/O, 32 bits, mode */
#define DMA_MODE_WRITE_LONG ((DMA_MODE_SIZE_LONG << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_LONG << DMA_MODE_SSIZE_OFF) | DMA_DES_SA_BIT)
#endif /* !defined(CONFIG_M5272) */
#if !defined(CONFIG_M5272)
/* enable/disable a specific DMA channel */
static __inline__ void enable_dma(unsigned int dmanr)
{
volatile unsigned short *dmawp;
#ifdef DMA_DEBUG
printk("enable_dma(dmanr=%d)\n", dmanr);
#endif
dmawp = (unsigned short *) dma_base_addr[dmanr];
dmawp[MCFDMA_DCR] |= MCFDMA_DCR_EEXT;
}
static __inline__ void disable_dma(unsigned int dmanr)
{
volatile unsigned short *dmawp;
volatile unsigned char *dmapb;
#ifdef DMA_DEBUG
printk("disable_dma(dmanr=%d)\n", dmanr);
#endif
dmawp = (unsigned short *) dma_base_addr[dmanr];
dmapb = (unsigned char *) dma_base_addr[dmanr];
/* Turn off external requests, and stop any DMA in progress */
dmawp[MCFDMA_DCR] &= ~MCFDMA_DCR_EEXT;
dmapb[MCFDMA_DSR] = MCFDMA_DSR_DONE;
}
/*
* Clear the 'DMA Pointer Flip Flop'.
* Write 0 for LSB/MSB, 1 for MSB/LSB access.
* Use this once to initialize the FF to a known state.
* After that, keep track of it. :-)
* --- In order to do that, the DMA routines below should ---
* --- only be used while interrupts are disabled! ---
*
* This is a NOP for ColdFire. Provide a stub for compatibility.
*/
static __inline__ void clear_dma_ff(unsigned int dmanr)
{
}
/* set mode (above) for a specific DMA channel */
static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
{
volatile unsigned char *dmabp;
volatile unsigned short *dmawp;
#ifdef DMA_DEBUG
printk("set_dma_mode(dmanr=%d,mode=%d)\n", dmanr, mode);
#endif
dmabp = (unsigned char *) dma_base_addr[dmanr];
dmawp = (unsigned short *) dma_base_addr[dmanr];
// Clear config errors
dmabp[MCFDMA_DSR] = MCFDMA_DSR_DONE;
// Set command register
dmawp[MCFDMA_DCR] =
MCFDMA_DCR_INT | // Enable completion irq
MCFDMA_DCR_CS | // Force one xfer per request
MCFDMA_DCR_AA | // Enable auto alignment
// single-address-mode
((mode & DMA_MODE_SINGLE_BIT) ? MCFDMA_DCR_SAA : 0) |
// sets s_rw (-> r/w) high if Memory to I/0
((mode & DMA_MODE_WRITE_BIT) ? MCFDMA_DCR_S_RW : 0) |
// Memory to I/O or I/O to Memory
((mode & DMA_MODE_WRITE_BIT) ? MCFDMA_DCR_SINC : MCFDMA_DCR_DINC) |
// 32 bit, 16 bit or 8 bit transfers
((mode & DMA_MODE_WORD_BIT) ? MCFDMA_DCR_SSIZE_WORD :
((mode & DMA_MODE_LONG_BIT) ? MCFDMA_DCR_SSIZE_LONG :
MCFDMA_DCR_SSIZE_BYTE)) |
((mode & DMA_MODE_WORD_BIT) ? MCFDMA_DCR_DSIZE_WORD :
((mode & DMA_MODE_LONG_BIT) ? MCFDMA_DCR_DSIZE_LONG :
MCFDMA_DCR_DSIZE_BYTE));
#ifdef DEBUG_DMA
printk("%s(%d): dmanr=%d DSR[%x]=%x DCR[%x]=%x\n", __FILE__, __LINE__,
dmanr, (int) &dmabp[MCFDMA_DSR], dmabp[MCFDMA_DSR],
(int) &dmawp[MCFDMA_DCR], dmawp[MCFDMA_DCR]);
#endif
}
/* Set transfer address for specific DMA channel */
static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a)
{
volatile unsigned short *dmawp;
volatile unsigned int *dmalp;
#ifdef DMA_DEBUG
printk("set_dma_addr(dmanr=%d,a=%x)\n", dmanr, a);
#endif
dmawp = (unsigned short *) dma_base_addr[dmanr];
dmalp = (unsigned int *) dma_base_addr[dmanr];
// Determine which address registers are used for memory/device accesses
if (dmawp[MCFDMA_DCR] & MCFDMA_DCR_SINC) {
// Source incrementing, must be memory
dmalp[MCFDMA_SAR] = a;
// Set dest address, must be device
dmalp[MCFDMA_DAR] = dma_device_address[dmanr];
} else {
// Destination incrementing, must be memory
dmalp[MCFDMA_DAR] = a;
// Set source address, must be device
dmalp[MCFDMA_SAR] = dma_device_address[dmanr];
}
#ifdef DEBUG_DMA
printk("%s(%d): dmanr=%d DCR[%x]=%x SAR[%x]=%08x DAR[%x]=%08x\n",
__FILE__, __LINE__, dmanr, (int) &dmawp[MCFDMA_DCR], dmawp[MCFDMA_DCR],
(int) &dmalp[MCFDMA_SAR], dmalp[MCFDMA_SAR],
(int) &dmalp[MCFDMA_DAR], dmalp[MCFDMA_DAR]);
#endif
}
/*
* Specific for Coldfire - sets device address.
* Should be called after the mode set call, and before set DMA address.
*/
static __inline__ void set_dma_device_addr(unsigned int dmanr, unsigned int a)
{
#ifdef DMA_DEBUG
printk("set_dma_device_addr(dmanr=%d,a=%x)\n", dmanr, a);
#endif
dma_device_address[dmanr] = a;
}
/*
* NOTE 2: "count" represents _bytes_.
*/
static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
{
volatile unsigned short *dmawp;
#ifdef DMA_DEBUG
printk("set_dma_count(dmanr=%d,count=%d)\n", dmanr, count);
#endif
dmawp = (unsigned short *) dma_base_addr[dmanr];
dmawp[MCFDMA_BCR] = (unsigned short)count;
}
/*
* Get DMA residue count. After a DMA transfer, this
* should return zero. Reading this while a DMA transfer is
* still in progress will return unpredictable results.
* Otherwise, it returns the number of _bytes_ left to transfer.
*/
static __inline__ int get_dma_residue(unsigned int dmanr)
{
volatile unsigned short *dmawp;
unsigned short count;
#ifdef DMA_DEBUG
printk("get_dma_residue(dmanr=%d)\n", dmanr);
#endif
dmawp = (unsigned short *) dma_base_addr[dmanr];
count = dmawp[MCFDMA_BCR];
return((int) count);
}
#else /* CONFIG_M5272 is defined */
/*
* The MCF5272 DMA controller is very different than the controller defined above
* in terms of register mapping. For instance, with the exception of the 16-bit
* interrupt register (IRQ#85, for reference), all of the registers are 32-bit.
*
* The big difference, however, is the lack of device-requested DMA. All modes
* are dual address transfer, and there is no 'device' setup or direction bit.
* You can DMA between a device and memory, between memory and memory, or even between
* two devices directly, with any combination of incrementing and non-incrementing
* addresses you choose. This puts a crimp in distinguishing between the 'device
* address' set up by set_dma_device_addr.
*
* Therefore, there are two options. One is to use set_dma_addr and set_dma_device_addr,
* which will act exactly as above in -- it will look to see if the source is set to
* autoincrement, and if so it will make the source use the set_dma_addr value and the
* destination the set_dma_device_addr value. Otherwise the source will be set to the
* set_dma_device_addr value and the destination will get the set_dma_addr value.
*
* The other is to use the provided set_dma_src_addr and set_dma_dest_addr functions
* and make it explicit. Depending on what you're doing, one of these two should work
* for you, but don't mix them in the same transfer setup.
*/
/* enable/disable a specific DMA channel */
static __inline__ void enable_dma(unsigned int dmanr)
{
volatile unsigned int *dmalp;
#ifdef DMA_DEBUG
printk("enable_dma(dmanr=%d)\n", dmanr);
#endif
dmalp = (unsigned int *) dma_base_addr[dmanr];
dmalp[MCFDMA_DMR] |= MCFDMA_DMR_EN;
}
static __inline__ void disable_dma(unsigned int dmanr)
{
volatile unsigned int *dmalp;
#ifdef DMA_DEBUG
printk("disable_dma(dmanr=%d)\n", dmanr);
#endif
dmalp = (unsigned int *) dma_base_addr[dmanr];
/* Turn off external requests, and stop any DMA in progress */
dmalp[MCFDMA_DMR] &= ~MCFDMA_DMR_EN;
dmalp[MCFDMA_DMR] |= MCFDMA_DMR_RESET;
}
/*
* Clear the 'DMA Pointer Flip Flop'.
* Write 0 for LSB/MSB, 1 for MSB/LSB access.
* Use this once to initialize the FF to a known state.
* After that, keep track of it. :-)
* --- In order to do that, the DMA routines below should ---
* --- only be used while interrupts are disabled! ---
*
* This is a NOP for ColdFire. Provide a stub for compatibility.
*/
static __inline__ void clear_dma_ff(unsigned int dmanr)
{
}
/* set mode (above) for a specific DMA channel */
static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
{
volatile unsigned int *dmalp;
volatile unsigned short *dmawp;
#ifdef DMA_DEBUG
printk("set_dma_mode(dmanr=%d,mode=%d)\n", dmanr, mode);
#endif
dmalp = (unsigned int *) dma_base_addr[dmanr];
dmawp = (unsigned short *) dma_base_addr[dmanr];
// Clear config errors
dmalp[MCFDMA_DMR] |= MCFDMA_DMR_RESET;
// Set command register
dmalp[MCFDMA_DMR] =
MCFDMA_DMR_RQM_DUAL | // Mandatory Request Mode setting
MCFDMA_DMR_DSTT_SD | // Set up addressing types; set to supervisor-data.
MCFDMA_DMR_SRCT_SD | // Set up addressing types; set to supervisor-data.
// source static-address-mode
((mode & DMA_MODE_SRC_SA_BIT) ? MCFDMA_DMR_SRCM_SA : MCFDMA_DMR_SRCM_IA) |
// dest static-address-mode
((mode & DMA_MODE_DES_SA_BIT) ? MCFDMA_DMR_DSTM_SA : MCFDMA_DMR_DSTM_IA) |
// burst, 32 bit, 16 bit or 8 bit transfers are separately configurable on the MCF5272
(((mode & DMA_MODE_SSIZE_MASK) >> DMA_MODE_SSIZE_OFF) << MCFDMA_DMR_DSTS_OFF) |
(((mode & DMA_MODE_SSIZE_MASK) >> DMA_MODE_SSIZE_OFF) << MCFDMA_DMR_SRCS_OFF);
dmawp[MCFDMA_DIR] |= MCFDMA_DIR_ASCEN; /* Enable completion interrupts */
#ifdef DEBUG_DMA
printk("%s(%d): dmanr=%d DMR[%x]=%x DIR[%x]=%x\n", __FILE__, __LINE__,
dmanr, (int) &dmalp[MCFDMA_DMR], dmabp[MCFDMA_DMR],
(int) &dmawp[MCFDMA_DIR], dmawp[MCFDMA_DIR]);
#endif
}
/* Set transfer address for specific DMA channel */
static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a)
{
volatile unsigned int *dmalp;
#ifdef DMA_DEBUG
printk("set_dma_addr(dmanr=%d,a=%x)\n", dmanr, a);
#endif
dmalp = (unsigned int *) dma_base_addr[dmanr];
// Determine which address registers are used for memory/device accesses
if (dmalp[MCFDMA_DMR] & MCFDMA_DMR_SRCM) {
// Source incrementing, must be memory
dmalp[MCFDMA_DSAR] = a;
// Set dest address, must be device
dmalp[MCFDMA_DDAR] = dma_device_address[dmanr];
} else {
// Destination incrementing, must be memory
dmalp[MCFDMA_DDAR] = a;
// Set source address, must be device
dmalp[MCFDMA_DSAR] = dma_device_address[dmanr];
}
#ifdef DEBUG_DMA
printk("%s(%d): dmanr=%d DMR[%x]=%x SAR[%x]=%08x DAR[%x]=%08x\n",
__FILE__, __LINE__, dmanr, (int) &dmawp[MCFDMA_DMR], dmawp[MCFDMA_DMR],
(int) &dmalp[MCFDMA_DSAR], dmalp[MCFDMA_DSAR],
(int) &dmalp[MCFDMA_DDAR], dmalp[MCFDMA_DDAR]);
#endif
}
/*
* Specific for Coldfire - sets device address.
* Should be called after the mode set call, and before set DMA address.
*/
static __inline__ void set_dma_device_addr(unsigned int dmanr, unsigned int a)
{
#ifdef DMA_DEBUG
printk("set_dma_device_addr(dmanr=%d,a=%x)\n", dmanr, a);
#endif
dma_device_address[dmanr] = a;
}
/*
* NOTE 2: "count" represents _bytes_.
*
* NOTE 3: While a 32-bit register, "count" is only a maximum 24-bit value.
*/
static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
{
volatile unsigned int *dmalp;
#ifdef DMA_DEBUG
printk("set_dma_count(dmanr=%d,count=%d)\n", dmanr, count);
#endif
dmalp = (unsigned int *) dma_base_addr[dmanr];
dmalp[MCFDMA_DBCR] = count;
}
/*
* Get DMA residue count. After a DMA transfer, this
* should return zero. Reading this while a DMA transfer is
* still in progress will return unpredictable results.
* Otherwise, it returns the number of _bytes_ left to transfer.
*/
static __inline__ int get_dma_residue(unsigned int dmanr)
{
volatile unsigned int *dmalp;
unsigned int count;
#ifdef DMA_DEBUG
printk("get_dma_residue(dmanr=%d)\n", dmanr);
#endif
dmalp = (unsigned int *) dma_base_addr[dmanr];
count = dmalp[MCFDMA_DBCR];
return(count);
}
#endif /* !defined(CONFIG_M5272) */
#endif /* CONFIG_COLDFIRE */
#define MAX_DMA_CHANNELS 8
/* Don't define MAX_DMA_ADDRESS; it's useless on the m68k/coldfire and any
occurrence should be flagged as an error. */
/* under 2.4 it is actually needed by the new bootmem allocator */
#define MAX_DMA_ADDRESS PAGE_OFFSET
/* These are in kernel/dma.c: */
extern int request_dma(unsigned int dmanr, const char *device_id); /* reserve a DMA channel */
extern void free_dma(unsigned int dmanr); /* release it again */
#endif /* _M68K_DMA_H */