mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6
* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6: [IA64] Clean up linker script using standard macros. [IA64] Use standard macros for page-aligned data. [IA64] Use .ref.text, not .text.init for start_ap. [IA64] sgi-xp: fix printk format warnings [IA64] ioc4_serial: fix printk format warnings [IA64] mbcs: fix printk format warnings [IA64] pci_br, fix infinite loop in find_free_ate() [IA64] kdump: Short path to freeze CPUs [IA64] kdump: Try INIT regardless of [IA64] kdump: Mask INIT first in panic-kdump path [IA64] kdump: Don't return APs to SAL from kdump [IA64] kexec: Unregister MCA handler before kexec [IA64] kexec: Make INIT safe while transition to [IA64] kdump: Mask MCA/INIT on frozen cpus Fix up conflict in arch/ia64/kernel/vmlinux.lds.S as per Tony's suggestion.
This commit is contained in:
commit
fa877c71e2
12 changed files with 179 additions and 153 deletions
|
@ -145,12 +145,14 @@ extern void ia64_mca_ucmc_handler(struct pt_regs *, struct ia64_sal_os_state *);
|
|||
extern void ia64_init_handler(struct pt_regs *,
|
||||
struct switch_stack *,
|
||||
struct ia64_sal_os_state *);
|
||||
extern void ia64_os_init_on_kdump(void);
|
||||
extern void ia64_monarch_init_handler(void);
|
||||
extern void ia64_slave_init_handler(void);
|
||||
extern void ia64_mca_cmc_vector_setup(void);
|
||||
extern int ia64_reg_MCA_extension(int (*fn)(void *, struct ia64_sal_os_state *));
|
||||
extern void ia64_unreg_MCA_extension(void);
|
||||
extern unsigned long ia64_get_rnat(unsigned long *);
|
||||
extern void ia64_set_psr_mc(void);
|
||||
extern void ia64_mca_printk(const char * fmt, ...)
|
||||
__attribute__ ((format (printf, 1, 2)));
|
||||
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
int kdump_status[NR_CPUS];
|
||||
static atomic_t kdump_cpu_frozen;
|
||||
atomic_t kdump_in_progress;
|
||||
static int kdump_freeze_monarch;
|
||||
static int kdump_on_init = 1;
|
||||
static int kdump_on_fatal_mca = 1;
|
||||
|
||||
|
@ -108,10 +109,38 @@ machine_crash_shutdown(struct pt_regs *pt)
|
|||
*/
|
||||
kexec_disable_iosapic();
|
||||
#ifdef CONFIG_SMP
|
||||
/*
|
||||
* If kdump_on_init is set and an INIT is asserted here, kdump will
|
||||
* be started again via INIT monarch.
|
||||
*/
|
||||
local_irq_disable();
|
||||
ia64_set_psr_mc(); /* mask MCA/INIT */
|
||||
if (atomic_inc_return(&kdump_in_progress) != 1)
|
||||
unw_init_running(kdump_cpu_freeze, NULL);
|
||||
|
||||
/*
|
||||
* Now this cpu is ready for kdump.
|
||||
* Stop all others by IPI or INIT. They could receive INIT from
|
||||
* outside and might be INIT monarch, but only thing they have to
|
||||
* do is falling into kdump_cpu_freeze().
|
||||
*
|
||||
* If an INIT is asserted here:
|
||||
* - All receivers might be slaves, since some of cpus could already
|
||||
* be frozen and INIT might be masked on monarch. In this case,
|
||||
* all slaves will be frozen soon since kdump_in_progress will let
|
||||
* them into DIE_INIT_SLAVE_LEAVE.
|
||||
* - One might be a monarch, but INIT rendezvous will fail since
|
||||
* at least this cpu already have INIT masked so it never join
|
||||
* to the rendezvous. In this case, all slaves and monarch will
|
||||
* be frozen soon with no wait since the INIT rendezvous is skipped
|
||||
* by kdump_in_progress.
|
||||
*/
|
||||
kdump_smp_send_stop();
|
||||
/* not all cpu response to IPI, send INIT to freeze them */
|
||||
if (kdump_wait_cpu_freeze() && kdump_on_init) {
|
||||
if (kdump_wait_cpu_freeze()) {
|
||||
kdump_smp_send_init();
|
||||
/* wait again, don't go ahead if possible */
|
||||
kdump_wait_cpu_freeze();
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
@ -129,17 +158,17 @@ void
|
|||
kdump_cpu_freeze(struct unw_frame_info *info, void *arg)
|
||||
{
|
||||
int cpuid;
|
||||
|
||||
local_irq_disable();
|
||||
cpuid = smp_processor_id();
|
||||
crash_save_this_cpu();
|
||||
current->thread.ksp = (__u64)info->sw - 16;
|
||||
|
||||
ia64_set_psr_mc(); /* mask MCA/INIT and stop reentrance */
|
||||
|
||||
atomic_inc(&kdump_cpu_frozen);
|
||||
kdump_status[cpuid] = 1;
|
||||
mb();
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
if (cpuid != 0)
|
||||
ia64_jump_to_sal(&sal_boot_rendez_state[cpuid]);
|
||||
#endif
|
||||
for (;;)
|
||||
cpu_relax();
|
||||
}
|
||||
|
@ -150,6 +179,20 @@ kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data)
|
|||
struct ia64_mca_notify_die *nd;
|
||||
struct die_args *args = data;
|
||||
|
||||
if (atomic_read(&kdump_in_progress)) {
|
||||
switch (val) {
|
||||
case DIE_INIT_MONARCH_LEAVE:
|
||||
if (!kdump_freeze_monarch)
|
||||
break;
|
||||
/* fall through */
|
||||
case DIE_INIT_SLAVE_LEAVE:
|
||||
case DIE_INIT_MONARCH_ENTER:
|
||||
case DIE_MCA_RENDZVOUS_LEAVE:
|
||||
unw_init_running(kdump_cpu_freeze, NULL);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!kdump_on_init && !kdump_on_fatal_mca)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
|
@ -162,43 +205,31 @@ kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data)
|
|||
}
|
||||
|
||||
if (val != DIE_INIT_MONARCH_LEAVE &&
|
||||
val != DIE_INIT_SLAVE_LEAVE &&
|
||||
val != DIE_INIT_MONARCH_PROCESS &&
|
||||
val != DIE_MCA_RENDZVOUS_LEAVE &&
|
||||
val != DIE_MCA_MONARCH_LEAVE)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
nd = (struct ia64_mca_notify_die *)args->err;
|
||||
/* Reason code 1 means machine check rendezvous*/
|
||||
if ((val == DIE_INIT_MONARCH_LEAVE || val == DIE_INIT_SLAVE_LEAVE
|
||||
|| val == DIE_INIT_MONARCH_PROCESS) && nd->sos->rv_rc == 1)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
switch (val) {
|
||||
case DIE_INIT_MONARCH_PROCESS:
|
||||
if (kdump_on_init) {
|
||||
atomic_set(&kdump_in_progress, 1);
|
||||
*(nd->monarch_cpu) = -1;
|
||||
/* Reason code 1 means machine check rendezvous*/
|
||||
if (kdump_on_init && (nd->sos->rv_rc != 1)) {
|
||||
if (atomic_inc_return(&kdump_in_progress) != 1)
|
||||
kdump_freeze_monarch = 1;
|
||||
}
|
||||
break;
|
||||
case DIE_INIT_MONARCH_LEAVE:
|
||||
if (kdump_on_init)
|
||||
/* Reason code 1 means machine check rendezvous*/
|
||||
if (kdump_on_init && (nd->sos->rv_rc != 1))
|
||||
machine_kdump_on_init();
|
||||
break;
|
||||
case DIE_INIT_SLAVE_LEAVE:
|
||||
if (atomic_read(&kdump_in_progress))
|
||||
unw_init_running(kdump_cpu_freeze, NULL);
|
||||
break;
|
||||
case DIE_MCA_RENDZVOUS_LEAVE:
|
||||
if (atomic_read(&kdump_in_progress))
|
||||
unw_init_running(kdump_cpu_freeze, NULL);
|
||||
break;
|
||||
case DIE_MCA_MONARCH_LEAVE:
|
||||
/* *(nd->data) indicate if MCA is recoverable */
|
||||
if (kdump_on_fatal_mca && !(*(nd->data))) {
|
||||
atomic_set(&kdump_in_progress, 1);
|
||||
*(nd->monarch_cpu) = -1;
|
||||
machine_kdump_on_init();
|
||||
if (atomic_inc_return(&kdump_in_progress) == 1)
|
||||
machine_kdump_on_init();
|
||||
/* We got fatal MCA while kdump!? No way!! */
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -167,7 +167,7 @@ RestRR: \
|
|||
mov _tmp2=((ia64_rid(IA64_REGION_ID_KERNEL, (num<<61)) << 8) | (pgsize << 2) | vhpt);; \
|
||||
mov rr[_tmp1]=_tmp2
|
||||
|
||||
.section __special_page_section,"ax"
|
||||
__PAGE_ALIGNED_DATA
|
||||
|
||||
.global empty_zero_page
|
||||
empty_zero_page:
|
||||
|
@ -181,7 +181,7 @@ swapper_pg_dir:
|
|||
halt_msg:
|
||||
stringz "Halting kernel\n"
|
||||
|
||||
.section .text.head,"ax"
|
||||
__REF
|
||||
|
||||
.global start_ap
|
||||
|
||||
|
@ -1242,7 +1242,7 @@ GLOBAL_ENTRY(ia64_jump_to_sal)
|
|||
movl r16=SAL_PSR_BITS_TO_SET;;
|
||||
mov cr.ipsr=r16
|
||||
mov cr.ifs=r0;;
|
||||
rfi;;
|
||||
rfi;; // note: this unmask MCA/INIT (psr.mc)
|
||||
1:
|
||||
/*
|
||||
* Invalidate all TLB data/inst
|
||||
|
|
|
@ -24,6 +24,8 @@
|
|||
#include <asm/delay.h>
|
||||
#include <asm/meminit.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/sal.h>
|
||||
#include <asm/mca.h>
|
||||
|
||||
typedef NORET_TYPE void (*relocate_new_kernel_t)(
|
||||
unsigned long indirection_page,
|
||||
|
@ -85,13 +87,26 @@ static void ia64_machine_kexec(struct unw_frame_info *info, void *arg)
|
|||
void *pal_addr = efi_get_pal_addr();
|
||||
unsigned long code_addr = (unsigned long)page_address(image->control_code_page);
|
||||
int ii;
|
||||
u64 fp, gp;
|
||||
ia64_fptr_t *init_handler = (ia64_fptr_t *)ia64_os_init_on_kdump;
|
||||
|
||||
BUG_ON(!image);
|
||||
if (image->type == KEXEC_TYPE_CRASH) {
|
||||
crash_save_this_cpu();
|
||||
current->thread.ksp = (__u64)info->sw - 16;
|
||||
|
||||
/* Register noop init handler */
|
||||
fp = ia64_tpa(init_handler->fp);
|
||||
gp = ia64_tpa(ia64_getreg(_IA64_REG_GP));
|
||||
ia64_sal_set_vectors(SAL_VECTOR_OS_INIT, fp, gp, 0, fp, gp, 0);
|
||||
} else {
|
||||
/* Unregister init handlers of current kernel */
|
||||
ia64_sal_set_vectors(SAL_VECTOR_OS_INIT, 0, 0, 0, 0, 0, 0);
|
||||
}
|
||||
|
||||
/* Unregister mca handler - No more recovery on current kernel */
|
||||
ia64_sal_set_vectors(SAL_VECTOR_OS_MCA, 0, 0, 0, 0, 0, 0);
|
||||
|
||||
/* Interrupts aren't acceptable while we reboot */
|
||||
local_irq_disable();
|
||||
|
||||
|
|
|
@ -1682,14 +1682,25 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
|
|||
|
||||
if (!sos->monarch) {
|
||||
ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_INIT;
|
||||
|
||||
#ifdef CONFIG_KEXEC
|
||||
while (monarch_cpu == -1 && !atomic_read(&kdump_in_progress))
|
||||
udelay(1000);
|
||||
#else
|
||||
while (monarch_cpu == -1)
|
||||
cpu_relax(); /* spin until monarch enters */
|
||||
cpu_relax(); /* spin until monarch enters */
|
||||
#endif
|
||||
|
||||
NOTIFY_INIT(DIE_INIT_SLAVE_ENTER, regs, (long)&nd, 1);
|
||||
NOTIFY_INIT(DIE_INIT_SLAVE_PROCESS, regs, (long)&nd, 1);
|
||||
|
||||
#ifdef CONFIG_KEXEC
|
||||
while (monarch_cpu != -1 && !atomic_read(&kdump_in_progress))
|
||||
udelay(1000);
|
||||
#else
|
||||
while (monarch_cpu != -1)
|
||||
cpu_relax(); /* spin until monarch leaves */
|
||||
cpu_relax(); /* spin until monarch leaves */
|
||||
#endif
|
||||
|
||||
NOTIFY_INIT(DIE_INIT_SLAVE_LEAVE, regs, (long)&nd, 1);
|
||||
|
||||
|
|
|
@ -40,6 +40,7 @@
|
|||
|
||||
.global ia64_do_tlb_purge
|
||||
.global ia64_os_mca_dispatch
|
||||
.global ia64_os_init_on_kdump
|
||||
.global ia64_os_init_dispatch_monarch
|
||||
.global ia64_os_init_dispatch_slave
|
||||
|
||||
|
@ -298,6 +299,25 @@ END(ia64_os_mca_virtual_begin)
|
|||
|
||||
//StartMain////////////////////////////////////////////////////////////////////
|
||||
|
||||
//
|
||||
// NOP init handler for kdump. In panic situation, we may receive INIT
|
||||
// while kernel transition. Since we initialize registers on leave from
|
||||
// current kernel, no longer monarch/slave handlers of current kernel in
|
||||
// virtual mode are called safely.
|
||||
// We can unregister these init handlers from SAL, however then the INIT
|
||||
// will result in warmboot by SAL and we cannot retrieve the crashdump.
|
||||
// Therefore register this NOP function to SAL, to prevent entering virtual
|
||||
// mode and resulting warmboot by SAL.
|
||||
//
|
||||
ia64_os_init_on_kdump:
|
||||
mov r8=r0 // IA64_INIT_RESUME
|
||||
mov r9=r10 // SAL_GP
|
||||
mov r22=r17 // *minstate
|
||||
;;
|
||||
mov r10=r0 // return to same context
|
||||
mov b0=r12 // SAL_CHECK return address
|
||||
br b0
|
||||
|
||||
//
|
||||
// SAL to OS entry point for INIT on all processors. This has been defined for
|
||||
// registration purposes with SAL as a part of ia64_mca_init. Monarch and
|
||||
|
@ -1073,3 +1093,30 @@ GLOBAL_ENTRY(ia64_get_rnat)
|
|||
mov ar.rsc=3
|
||||
br.ret.sptk.many rp
|
||||
END(ia64_get_rnat)
|
||||
|
||||
|
||||
// void ia64_set_psr_mc(void)
|
||||
//
|
||||
// Set psr.mc bit to mask MCA/INIT.
|
||||
GLOBAL_ENTRY(ia64_set_psr_mc)
|
||||
rsm psr.i | psr.ic // disable interrupts
|
||||
;;
|
||||
srlz.d
|
||||
;;
|
||||
mov r14 = psr // get psr{36:35,31:0}
|
||||
movl r15 = 1f
|
||||
;;
|
||||
dep r14 = -1, r14, PSR_MC, 1 // set psr.mc
|
||||
;;
|
||||
dep r14 = -1, r14, PSR_IC, 1 // set psr.ic
|
||||
;;
|
||||
dep r14 = -1, r14, PSR_BN, 1 // keep bank1 in use
|
||||
;;
|
||||
mov cr.ipsr = r14
|
||||
mov cr.ifs = r0
|
||||
mov cr.iip = r15
|
||||
;;
|
||||
rfi
|
||||
1:
|
||||
br.ret.sptk.many rp
|
||||
END(ia64_set_psr_mc)
|
||||
|
|
|
@ -52,7 +52,7 @@ GLOBAL_ENTRY(relocate_new_kernel)
|
|||
srlz.i
|
||||
;;
|
||||
mov ar.rnat=r18
|
||||
rfi
|
||||
rfi // note: this unmask MCA/INIT (psr.mc)
|
||||
;;
|
||||
1:
|
||||
//physical mode code begin
|
||||
|
|
|
@ -51,8 +51,6 @@ SECTIONS
|
|||
KPROBES_TEXT
|
||||
*(.gnu.linkonce.t*)
|
||||
}
|
||||
.text.head : AT(ADDR(.text.head) - LOAD_OFFSET)
|
||||
{ *(.text.head) }
|
||||
.text2 : AT(ADDR(.text2) - LOAD_OFFSET)
|
||||
{ *(.text2) }
|
||||
#ifdef CONFIG_SMP
|
||||
|
@ -66,14 +64,7 @@ SECTIONS
|
|||
NOTES :code :note /* put .notes in text and mark in PT_NOTE */
|
||||
code_continues : {} :code /* switch back to regular program... */
|
||||
|
||||
/* Exception table */
|
||||
. = ALIGN(16);
|
||||
__ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET)
|
||||
{
|
||||
__start___ex_table = .;
|
||||
*(__ex_table)
|
||||
__stop___ex_table = .;
|
||||
}
|
||||
EXCEPTION_TABLE(16)
|
||||
|
||||
/* MCA table */
|
||||
. = ALIGN(16);
|
||||
|
@ -115,38 +106,9 @@ SECTIONS
|
|||
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
__init_begin = .;
|
||||
.init.text : AT(ADDR(.init.text) - LOAD_OFFSET)
|
||||
{
|
||||
_sinittext = .;
|
||||
INIT_TEXT
|
||||
_einittext = .;
|
||||
}
|
||||
|
||||
.init.data : AT(ADDR(.init.data) - LOAD_OFFSET)
|
||||
{ INIT_DATA }
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
.init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET)
|
||||
{
|
||||
__initramfs_start = .;
|
||||
*(.init.ramfs)
|
||||
__initramfs_end = .;
|
||||
}
|
||||
#endif
|
||||
|
||||
. = ALIGN(16);
|
||||
.init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET)
|
||||
{
|
||||
__setup_start = .;
|
||||
*(.init.setup)
|
||||
__setup_end = .;
|
||||
}
|
||||
.initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET)
|
||||
{
|
||||
__initcall_start = .;
|
||||
INITCALLS
|
||||
__initcall_end = .;
|
||||
}
|
||||
INIT_TEXT_SECTION(PAGE_SIZE)
|
||||
INIT_DATA_SECTION(16)
|
||||
|
||||
.data.patch.vtop : AT(ADDR(.data.patch.vtop) - LOAD_OFFSET)
|
||||
{
|
||||
|
@ -204,24 +166,13 @@ SECTIONS
|
|||
}
|
||||
#endif
|
||||
|
||||
. = ALIGN(8);
|
||||
__con_initcall_start = .;
|
||||
.con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET)
|
||||
{ *(.con_initcall.init) }
|
||||
__con_initcall_end = .;
|
||||
__security_initcall_start = .;
|
||||
.security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET)
|
||||
{ *(.security_initcall.init) }
|
||||
__security_initcall_end = .;
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
__init_end = .;
|
||||
|
||||
/* The initial task and kernel stack */
|
||||
.data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET)
|
||||
{ *(.data.init_task) }
|
||||
|
||||
.data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET)
|
||||
{ *(__special_page_section)
|
||||
{
|
||||
PAGE_ALIGNED_DATA(PAGE_SIZE)
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
__start_gate_section = .;
|
||||
*(.data.gate)
|
||||
__stop_gate_section = .;
|
||||
|
@ -236,12 +187,6 @@ SECTIONS
|
|||
* kernel data
|
||||
*/
|
||||
|
||||
.data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET)
|
||||
{ *(.data.read_mostly) }
|
||||
|
||||
.data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET)
|
||||
{ *(.data.cacheline_aligned) }
|
||||
|
||||
/* Per-cpu data: */
|
||||
. = ALIGN(PERCPU_PAGE_SIZE);
|
||||
PERCPU_VADDR(PERCPU_ADDR, :percpu)
|
||||
|
@ -258,6 +203,9 @@ SECTIONS
|
|||
__cpu0_per_cpu = .;
|
||||
. = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */
|
||||
#endif
|
||||
INIT_TASK_DATA(PAGE_SIZE)
|
||||
CACHELINE_ALIGNED_DATA(SMP_CACHE_BYTES)
|
||||
READ_MOSTLY_DATA(SMP_CACHE_BYTES)
|
||||
DATA_DATA
|
||||
*(.data1)
|
||||
*(.gnu.linkonce.d*)
|
||||
|
@ -274,48 +222,15 @@ SECTIONS
|
|||
.sdata : AT(ADDR(.sdata) - LOAD_OFFSET)
|
||||
{ *(.sdata) *(.sdata1) *(.srdata) }
|
||||
_edata = .;
|
||||
__bss_start = .;
|
||||
.sbss : AT(ADDR(.sbss) - LOAD_OFFSET)
|
||||
{ *(.sbss) *(.scommon) }
|
||||
.bss : AT(ADDR(.bss) - LOAD_OFFSET)
|
||||
{ *(.bss) *(COMMON) }
|
||||
__bss_stop = .;
|
||||
|
||||
BSS_SECTION(0, 0, 0)
|
||||
|
||||
_end = .;
|
||||
|
||||
code : { } :code
|
||||
/* Stabs debugging sections. */
|
||||
.stab 0 : { *(.stab) }
|
||||
.stabstr 0 : { *(.stabstr) }
|
||||
.stab.excl 0 : { *(.stab.excl) }
|
||||
.stab.exclstr 0 : { *(.stab.exclstr) }
|
||||
.stab.index 0 : { *(.stab.index) }
|
||||
.stab.indexstr 0 : { *(.stab.indexstr) }
|
||||
/* DWARF debug sections.
|
||||
Symbols in the DWARF debugging sections are relative to the beginning
|
||||
of the section so we begin them at 0. */
|
||||
/* DWARF 1 */
|
||||
.debug 0 : { *(.debug) }
|
||||
.line 0 : { *(.line) }
|
||||
/* GNU DWARF 1 extensions */
|
||||
.debug_srcinfo 0 : { *(.debug_srcinfo) }
|
||||
.debug_sfnames 0 : { *(.debug_sfnames) }
|
||||
/* DWARF 1.1 and DWARF 2 */
|
||||
.debug_aranges 0 : { *(.debug_aranges) }
|
||||
.debug_pubnames 0 : { *(.debug_pubnames) }
|
||||
/* DWARF 2 */
|
||||
.debug_info 0 : { *(.debug_info) }
|
||||
.debug_abbrev 0 : { *(.debug_abbrev) }
|
||||
.debug_line 0 : { *(.debug_line) }
|
||||
.debug_frame 0 : { *(.debug_frame) }
|
||||
.debug_str 0 : { *(.debug_str) }
|
||||
.debug_loc 0 : { *(.debug_loc) }
|
||||
.debug_macinfo 0 : { *(.debug_macinfo) }
|
||||
/* SGI/MIPS DWARF 2 extensions */
|
||||
.debug_weaknames 0 : { *(.debug_weaknames) }
|
||||
.debug_funcnames 0 : { *(.debug_funcnames) }
|
||||
.debug_typenames 0 : { *(.debug_typenames) }
|
||||
.debug_varnames 0 : { *(.debug_varnames) }
|
||||
|
||||
STABS_DEBUG
|
||||
DWARF_DEBUG
|
||||
|
||||
/* Default discards */
|
||||
DISCARDS
|
||||
|
|
|
@ -54,6 +54,8 @@ static int find_free_ate(struct ate_resource *ate_resource, int start,
|
|||
break;
|
||||
}
|
||||
}
|
||||
if (i >= ate_resource->num_ate)
|
||||
return -1;
|
||||
} else
|
||||
index++; /* Try next ate */
|
||||
}
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
#include <linux/moduleparam.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/reboot.h>
|
||||
#include <linux/init.h>
|
||||
|
@ -715,8 +716,8 @@ static ssize_t show_algo(struct device *dev, struct device_attribute *attr, char
|
|||
*/
|
||||
debug0 = *(uint64_t *) soft->debug_addr;
|
||||
|
||||
return sprintf(buf, "0x%lx 0x%lx\n",
|
||||
(debug0 >> 32), (debug0 & 0xffffffff));
|
||||
return sprintf(buf, "0x%x 0x%x\n",
|
||||
upper_32_bits(debug0), lower_32_bits(debug0));
|
||||
}
|
||||
|
||||
static ssize_t store_algo(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
|
||||
|
|
|
@ -279,7 +279,7 @@ xpc_check_for_sent_chctl_flags_sn2(struct xpc_partition *part)
|
|||
spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
|
||||
|
||||
dev_dbg(xpc_chan, "received notify IRQ from partid=%d, chctl.all_flags="
|
||||
"0x%lx\n", XPC_PARTID(part), chctl.all_flags);
|
||||
"0x%llx\n", XPC_PARTID(part), chctl.all_flags);
|
||||
|
||||
xpc_wakeup_channel_mgr(part);
|
||||
}
|
||||
|
@ -615,7 +615,8 @@ xpc_get_partition_rsvd_page_pa_sn2(void *buf, u64 *cookie, unsigned long *rp_pa,
|
|||
s64 status;
|
||||
enum xp_retval ret;
|
||||
|
||||
status = sn_partition_reserved_page_pa((u64)buf, cookie, rp_pa, len);
|
||||
status = sn_partition_reserved_page_pa((u64)buf, cookie,
|
||||
(u64 *)rp_pa, (u64 *)len);
|
||||
if (status == SALRET_OK)
|
||||
ret = xpSuccess;
|
||||
else if (status == SALRET_MORE_PASSES)
|
||||
|
@ -777,8 +778,8 @@ xpc_get_remote_heartbeat_sn2(struct xpc_partition *part)
|
|||
if (ret != xpSuccess)
|
||||
return ret;
|
||||
|
||||
dev_dbg(xpc_part, "partid=%d, heartbeat=%ld, last_heartbeat=%ld, "
|
||||
"heartbeat_offline=%ld, HB_mask[0]=0x%lx\n", XPC_PARTID(part),
|
||||
dev_dbg(xpc_part, "partid=%d, heartbeat=%lld, last_heartbeat=%lld, "
|
||||
"heartbeat_offline=%lld, HB_mask[0]=0x%lx\n", XPC_PARTID(part),
|
||||
remote_vars->heartbeat, part->last_heartbeat,
|
||||
remote_vars->heartbeat_offline,
|
||||
remote_vars->heartbeating_to_mask[0]);
|
||||
|
@ -940,7 +941,7 @@ xpc_update_partition_info_sn2(struct xpc_partition *part, u8 remote_rp_version,
|
|||
part_sn2->remote_vars_pa);
|
||||
|
||||
part->last_heartbeat = remote_vars->heartbeat - 1;
|
||||
dev_dbg(xpc_part, " last_heartbeat = 0x%016lx\n",
|
||||
dev_dbg(xpc_part, " last_heartbeat = 0x%016llx\n",
|
||||
part->last_heartbeat);
|
||||
|
||||
part_sn2->remote_vars_part_pa = remote_vars->vars_part_pa;
|
||||
|
@ -1029,7 +1030,8 @@ xpc_identify_activate_IRQ_req_sn2(int nasid)
|
|||
part->activate_IRQ_rcvd++;
|
||||
|
||||
dev_dbg(xpc_part, "partid for nasid %d is %d; IRQs = %d; HB = "
|
||||
"%ld:0x%lx\n", (int)nasid, (int)partid, part->activate_IRQ_rcvd,
|
||||
"%lld:0x%lx\n", (int)nasid, (int)partid,
|
||||
part->activate_IRQ_rcvd,
|
||||
remote_vars->heartbeat, remote_vars->heartbeating_to_mask[0]);
|
||||
|
||||
if (xpc_partition_disengaged(part) &&
|
||||
|
@ -1129,7 +1131,7 @@ xpc_identify_activate_IRQ_sender_sn2(void)
|
|||
do {
|
||||
n_IRQs_detected++;
|
||||
nasid = (l * BITS_PER_LONG + b) * 2;
|
||||
dev_dbg(xpc_part, "interrupt from nasid %ld\n", nasid);
|
||||
dev_dbg(xpc_part, "interrupt from nasid %lld\n", nasid);
|
||||
xpc_identify_activate_IRQ_req_sn2(nasid);
|
||||
|
||||
b = find_next_bit(&nasid_mask_long, BITS_PER_LONG,
|
||||
|
@ -1386,7 +1388,7 @@ xpc_pull_remote_vars_part_sn2(struct xpc_partition *part)
|
|||
|
||||
if (pulled_entry->magic != 0) {
|
||||
dev_dbg(xpc_chan, "partition %d's XPC vars_part for "
|
||||
"partition %d has bad magic value (=0x%lx)\n",
|
||||
"partition %d has bad magic value (=0x%llx)\n",
|
||||
partid, sn_partition_id, pulled_entry->magic);
|
||||
return xpBadMagic;
|
||||
}
|
||||
|
@ -1730,14 +1732,14 @@ xpc_notify_senders_sn2(struct xpc_channel *ch, enum xp_retval reason, s64 put)
|
|||
|
||||
if (notify->func != NULL) {
|
||||
dev_dbg(xpc_chan, "notify->func() called, notify=0x%p "
|
||||
"msg_number=%ld partid=%d channel=%d\n",
|
||||
"msg_number=%lld partid=%d channel=%d\n",
|
||||
(void *)notify, get, ch->partid, ch->number);
|
||||
|
||||
notify->func(reason, ch->partid, ch->number,
|
||||
notify->key);
|
||||
|
||||
dev_dbg(xpc_chan, "notify->func() returned, notify=0x%p"
|
||||
" msg_number=%ld partid=%d channel=%d\n",
|
||||
" msg_number=%lld partid=%d channel=%d\n",
|
||||
(void *)notify, get, ch->partid, ch->number);
|
||||
}
|
||||
}
|
||||
|
@ -1858,7 +1860,7 @@ xpc_process_msg_chctl_flags_sn2(struct xpc_partition *part, int ch_number)
|
|||
|
||||
ch_sn2->w_remote_GP.get = ch_sn2->remote_GP.get;
|
||||
|
||||
dev_dbg(xpc_chan, "w_remote_GP.get changed to %ld, partid=%d, "
|
||||
dev_dbg(xpc_chan, "w_remote_GP.get changed to %lld, partid=%d, "
|
||||
"channel=%d\n", ch_sn2->w_remote_GP.get, ch->partid,
|
||||
ch->number);
|
||||
|
||||
|
@ -1885,7 +1887,7 @@ xpc_process_msg_chctl_flags_sn2(struct xpc_partition *part, int ch_number)
|
|||
smp_wmb(); /* ensure flags have been cleared before bte_copy */
|
||||
ch_sn2->w_remote_GP.put = ch_sn2->remote_GP.put;
|
||||
|
||||
dev_dbg(xpc_chan, "w_remote_GP.put changed to %ld, partid=%d, "
|
||||
dev_dbg(xpc_chan, "w_remote_GP.put changed to %lld, partid=%d, "
|
||||
"channel=%d\n", ch_sn2->w_remote_GP.put, ch->partid,
|
||||
ch->number);
|
||||
|
||||
|
@ -1943,7 +1945,7 @@ xpc_pull_remote_msg_sn2(struct xpc_channel *ch, s64 get)
|
|||
if (ret != xpSuccess) {
|
||||
|
||||
dev_dbg(xpc_chan, "failed to pull %d msgs starting with"
|
||||
" msg %ld from partition %d, channel=%d, "
|
||||
" msg %lld from partition %d, channel=%d, "
|
||||
"ret=%d\n", nmsgs, ch_sn2->next_msg_to_pull,
|
||||
ch->partid, ch->number, ret);
|
||||
|
||||
|
@ -1995,7 +1997,7 @@ xpc_get_deliverable_payload_sn2(struct xpc_channel *ch)
|
|||
if (cmpxchg(&ch_sn2->w_local_GP.get, get, get + 1) == get) {
|
||||
/* we got the entry referenced by get */
|
||||
|
||||
dev_dbg(xpc_chan, "w_local_GP.get changed to %ld, "
|
||||
dev_dbg(xpc_chan, "w_local_GP.get changed to %lld, "
|
||||
"partid=%d, channel=%d\n", get + 1,
|
||||
ch->partid, ch->number);
|
||||
|
||||
|
@ -2062,7 +2064,7 @@ xpc_send_msgs_sn2(struct xpc_channel *ch, s64 initial_put)
|
|||
|
||||
/* we just set the new value of local_GP->put */
|
||||
|
||||
dev_dbg(xpc_chan, "local_GP->put changed to %ld, partid=%d, "
|
||||
dev_dbg(xpc_chan, "local_GP->put changed to %lld, partid=%d, "
|
||||
"channel=%d\n", put, ch->partid, ch->number);
|
||||
|
||||
send_msgrequest = 1;
|
||||
|
@ -2147,8 +2149,8 @@ xpc_allocate_msg_sn2(struct xpc_channel *ch, u32 flags,
|
|||
DBUG_ON(msg->flags != 0);
|
||||
msg->number = put;
|
||||
|
||||
dev_dbg(xpc_chan, "w_local_GP.put changed to %ld; msg=0x%p, "
|
||||
"msg_number=%ld, partid=%d, channel=%d\n", put + 1,
|
||||
dev_dbg(xpc_chan, "w_local_GP.put changed to %lld; msg=0x%p, "
|
||||
"msg_number=%lld, partid=%d, channel=%d\n", put + 1,
|
||||
(void *)msg, msg->number, ch->partid, ch->number);
|
||||
|
||||
*address_of_msg = msg;
|
||||
|
@ -2296,7 +2298,7 @@ xpc_acknowledge_msgs_sn2(struct xpc_channel *ch, s64 initial_get, u8 msg_flags)
|
|||
|
||||
/* we just set the new value of local_GP->get */
|
||||
|
||||
dev_dbg(xpc_chan, "local_GP->get changed to %ld, partid=%d, "
|
||||
dev_dbg(xpc_chan, "local_GP->get changed to %lld, partid=%d, "
|
||||
"channel=%d\n", get, ch->partid, ch->number);
|
||||
|
||||
send_msgrequest = (msg_flags & XPC_M_SN2_INTERRUPT);
|
||||
|
@ -2323,7 +2325,7 @@ xpc_received_payload_sn2(struct xpc_channel *ch, void *payload)
|
|||
msg = container_of(payload, struct xpc_msg_sn2, payload);
|
||||
msg_number = msg->number;
|
||||
|
||||
dev_dbg(xpc_chan, "msg=0x%p, msg_number=%ld, partid=%d, channel=%d\n",
|
||||
dev_dbg(xpc_chan, "msg=0x%p, msg_number=%lld, partid=%d, channel=%d\n",
|
||||
(void *)msg, msg_number, ch->partid, ch->number);
|
||||
|
||||
DBUG_ON((((u64)msg - (u64)ch->sn.sn2.remote_msgqueue) / ch->entry_size) !=
|
||||
|
|
|
@ -930,7 +930,7 @@ static void handle_dma_error_intr(void *arg, uint32_t other_ir)
|
|||
|
||||
if (readl(&port->ip_mem->pci_err_addr_l.raw) & IOC4_PCI_ERR_ADDR_VLD) {
|
||||
printk(KERN_ERR
|
||||
"PCI error address is 0x%lx, "
|
||||
"PCI error address is 0x%llx, "
|
||||
"master is serial port %c %s\n",
|
||||
(((uint64_t)readl(&port->ip_mem->pci_err_addr_h)
|
||||
<< 32)
|
||||
|
|
Loading…
Reference in a new issue