Logo Search packages:      
Sourcecode: linux version File versions

system.h

#ifndef __ASM_ARM_SYSTEM_H
#define __ASM_ARM_SYSTEM_H

#ifdef __KERNEL__

#include <asm/memory.h>

#define CPU_ARCH_UNKNOWN      0
#define CPU_ARCH_ARMv3        1
#define CPU_ARCH_ARMv4        2
#define CPU_ARCH_ARMv4T       3
#define CPU_ARCH_ARMv5        4
#define CPU_ARCH_ARMv5T       5
#define CPU_ARCH_ARMv5TE      6
#define CPU_ARCH_ARMv5TEJ     7
#define CPU_ARCH_ARMv6        8
#define CPU_ARCH_ARMv7        9

/*
 * CR1 bits (CP#15 CR1)
 */
#define CR_M      (1 << 0)    /* MMU enable                       */
#define CR_A      (1 << 1)    /* Alignment abort enable           */
#define CR_C      (1 << 2)    /* Dcache enable              */
#define CR_W      (1 << 3)    /* Write buffer enable              */
#define CR_P      (1 << 4)    /* 32-bit exception handler         */
#define CR_D      (1 << 5)    /* 32-bit data address range        */
#define CR_L      (1 << 6)    /* Implementation defined           */
#define CR_B      (1 << 7)    /* Big endian                       */
#define CR_S      (1 << 8)    /* System MMU protection            */
#define CR_R      (1 << 9)    /* ROM MMU protection               */
#define CR_F      (1 << 10)   /* Implementation defined           */
#define CR_Z      (1 << 11)   /* Implementation defined           */
#define CR_I      (1 << 12)   /* Icache enable              */
#define CR_V      (1 << 13)   /* Vectors relocated to 0xffff0000  */
#define CR_RR     (1 << 14)   /* Round Robin cache replacement    */
#define CR_L4     (1 << 15)   /* LDR pc can set T bit             */
#define CR_DT     (1 << 16)
#define CR_IT     (1 << 18)
#define CR_ST     (1 << 19)
#define CR_FI     (1 << 21)   /* Fast interrupt (lower latency mode)    */
#define CR_U      (1 << 22)   /* Unaligned access operation       */
#define CR_XP     (1 << 23)   /* Extended page tables             */
#define CR_VE     (1 << 24)   /* Vectored interrupts              */

#define CPUID_ID  0
#define CPUID_CACHETYPE 1
#define CPUID_TCM 2
#define CPUID_TLBTYPE   3

/*
 * This is used to ensure the compiler did actually allocate the register we
 * asked it for some inline assembly sequences.  Apparently we can't trust
 * the compiler from one version to another so a bit of paranoia won't hurt.
 * This string is meant to be concatenated with the inline asm string and
 * will cause compilation to stop on mismatch.
 * (for details, see gcc PR 15089)
 */
#define __asmeq(x, y)  ".ifnc " x "," y " ; .err ; .endif\n\t"

#ifndef __ASSEMBLY__

#include <linux/linkage.h>
#include <linux/stringify.h>
#include <linux/irqflags.h>

#ifdef CONFIG_CPU_CP15
#define read_cpuid(reg)                                     \
      ({                                              \
            unsigned int __val;                             \
            asm("mrc    p15, 0, %0, c0, c0, " __stringify(reg)    \
                : "=r" (__val)                              \
                :                                     \
                : "cc");                                    \
            __val;                                          \
      })
#else
extern unsigned int processor_id;
#define read_cpuid(reg) (processor_id)
#endif

/*
 * The CPU ID never changes at run time, so we might as well tell the
 * compiler that it's constant.  Use this function to read the CPU ID
 * rather than directly reading processor_id or read_cpuid() directly.
 */
static inline unsigned int read_cpuid_id(void) __attribute_const__;

static inline unsigned int read_cpuid_id(void)
{
      return read_cpuid(CPUID_ID);
}

#define __exception     __attribute__((section(".exception.text")))

struct thread_info;
struct task_struct;

/* information about the system we're running on */
extern unsigned int system_rev;
extern unsigned int system_serial_low;
extern unsigned int system_serial_high;
extern unsigned int mem_fclk_21285;

struct pt_regs;

void die(const char *msg, struct pt_regs *regs, int err)
            __attribute__((noreturn));

struct siginfo;
void arm_notify_die(const char *str, struct pt_regs *regs, struct siginfo *info,
            unsigned long err, unsigned long trap);

void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int,
                               struct pt_regs *),
                 int sig, const char *name);

#define xchg(ptr,x) \
      ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))

extern asmlinkage void __backtrace(void);
extern asmlinkage void c_backtrace(unsigned long fp, int pmode);

struct mm_struct;
extern void show_pte(struct mm_struct *mm, unsigned long addr);
extern void __show_regs(struct pt_regs *);

extern int cpu_architecture(void);
extern void cpu_init(void);

void arm_machine_restart(char mode);
extern void (*arm_pm_restart)(char str);

/*
 * Intel's XScale3 core supports some v6 features (supersections, L2)
 * but advertises itself as v5 as it does not support the v6 ISA.  For
 * this reason, we need a way to explicitly test for this type of CPU.
 */
#ifndef CONFIG_CPU_XSC3
#define cpu_is_xsc3()   0
#else
static inline int cpu_is_xsc3(void)
{
      extern unsigned int processor_id;

      if ((processor_id & 0xffffe000) == 0x69056000)
            return 1;

      return 0;
}
#endif

#if !defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_CPU_XSC3)
#define     cpu_is_xscale()   0
#else
#define     cpu_is_xscale()   1
#endif

#define UDBG_UNDEFINED  (1 << 0)
#define UDBG_SYSCALL    (1 << 1)
#define UDBG_BADABORT   (1 << 2)
#define UDBG_SEGV (1 << 3)
#define UDBG_BUS  (1 << 4)

extern unsigned int user_debug;

#if __LINUX_ARM_ARCH__ >= 4
#define vectors_high()  (cr_alignment & CR_V)
#else
#define vectors_high()  (0)
#endif

#if __LINUX_ARM_ARCH__ >= 7
#define isb() __asm__ __volatile__ ("isb" : : : "memory")
#define dsb() __asm__ __volatile__ ("dsb" : : : "memory")
#define dmb() __asm__ __volatile__ ("dmb" : : : "memory")
#elif defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ == 6
#define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \
                            : : "r" (0) : "memory")
#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
                            : : "r" (0) : "memory")
#define dmb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \
                            : : "r" (0) : "memory")
#else
#define isb() __asm__ __volatile__ ("" : : : "memory")
#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
                            : : "r" (0) : "memory")
#define dmb() __asm__ __volatile__ ("" : : : "memory")
#endif

#ifndef CONFIG_SMP
#define mb()      do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
#define rmb()     do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
#define wmb()     do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
#define smp_mb()  barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
#else
#define mb()            dmb()
#define rmb()           dmb()
#define wmb()           dmb()
#define smp_mb()  dmb()
#define smp_rmb() dmb()
#define smp_wmb() dmb()
#endif
#define read_barrier_depends()            do { } while(0)
#define smp_read_barrier_depends()  do { } while(0)

#define set_mb(var, value)    do { var = value; smp_mb(); } while (0)
#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");

extern unsigned long cr_no_alignment;     /* defined in entry-armv.S */
extern unsigned long cr_alignment;  /* defined in entry-armv.S */

static inline unsigned int get_cr(void)
{
      unsigned int val;
      asm("mrc p15, 0, %0, c1, c0, 0      @ get CR" : "=r" (val) : : "cc");
      return val;
}

static inline void set_cr(unsigned int val)
{
      asm volatile("mcr p15, 0, %0, c1, c0, 0   @ set CR"
        : : "r" (val) : "cc");
      isb();
}

#ifndef CONFIG_SMP
extern void adjust_cr(unsigned long mask, unsigned long set);
#endif

#define CPACC_FULL(n)         (3 << (n * 2))
#define CPACC_SVC(n)          (1 << (n * 2))
#define CPACC_DISABLE(n)      (0 << (n * 2))

static inline unsigned int get_copro_access(void)
{
      unsigned int val;
      asm("mrc p15, 0, %0, c1, c0, 2 @ get copro access"
        : "=r" (val) : : "cc");
      return val;
}

static inline void set_copro_access(unsigned int val)
{
      asm volatile("mcr p15, 0, %0, c1, c0, 2 @ set copro access"
        : : "r" (val) : "cc");
      isb();
}

/*
 * switch_mm() may do a full cache flush over the context switch,
 * so enable interrupts over the context switch to avoid high
 * latency.
 */
#define __ARCH_WANT_INTERRUPTS_ON_CTXSW

/*
 * switch_to(prev, next) should switch from task `prev' to `next'
 * `prev' will never be the same as `next'.  schedule() itself
 * contains the memory barrier to tell GCC not to cache `current'.
 */
extern struct task_struct *__switch_to(struct task_struct *, struct thread_info *, struct thread_info *);

#define switch_to(prev,next,last)                           \
do {                                                  \
      last = __switch_to(prev,task_thread_info(prev), task_thread_info(next));      \
} while (0)

#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
/*
 * On the StrongARM, "swp" is terminally broken since it bypasses the
 * cache totally.  This means that the cache becomes inconsistent, and,
 * since we use normal loads/stores as well, this is really bad.
 * Typically, this causes oopsen in filp_close, but could have other,
 * more disasterous effects.  There are two work-arounds:
 *  1. Disable interrupts and emulate the atomic swap
 *  2. Clean the cache, perform atomic swap, flush the cache
 *
 * We choose (1) since its the "easiest" to achieve here and is not
 * dependent on the processor type.
 *
 * NOTE that this solution won't work on an SMP system, so explcitly
 * forbid it here.
 */
#define swp_is_buggy
#endif

static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
{
      extern void __bad_xchg(volatile void *, int);
      unsigned long ret;
#ifdef swp_is_buggy
      unsigned long flags;
#endif
#if __LINUX_ARM_ARCH__ >= 6
      unsigned int tmp;
#endif

      switch (size) {
#if __LINUX_ARM_ARCH__ >= 6
      case 1:
            asm volatile("@   __xchg1\n"
            "1:   ldrexb      %0, [%3]\n"
            "     strexb      %1, %2, [%3]\n"
            "     teq   %1, #0\n"
            "     bne   1b"
                  : "=&r" (ret), "=&r" (tmp)
                  : "r" (x), "r" (ptr)
                  : "memory", "cc");
            break;
      case 4:
            asm volatile("@   __xchg4\n"
            "1:   ldrex %0, [%3]\n"
            "     strex %1, %2, [%3]\n"
            "     teq   %1, #0\n"
            "     bne   1b"
                  : "=&r" (ret), "=&r" (tmp)
                  : "r" (x), "r" (ptr)
                  : "memory", "cc");
            break;
#elif defined(swp_is_buggy)
#ifdef CONFIG_SMP
#error SMP is not supported on this platform
#endif
      case 1:
            raw_local_irq_save(flags);
            ret = *(volatile unsigned char *)ptr;
            *(volatile unsigned char *)ptr = x;
            raw_local_irq_restore(flags);
            break;

      case 4:
            raw_local_irq_save(flags);
            ret = *(volatile unsigned long *)ptr;
            *(volatile unsigned long *)ptr = x;
            raw_local_irq_restore(flags);
            break;
#else
      case 1:
            asm volatile("@   __xchg1\n"
            "     swpb  %0, %1, [%2]"
                  : "=&r" (ret)
                  : "r" (x), "r" (ptr)
                  : "memory", "cc");
            break;
      case 4:
            asm volatile("@   __xchg4\n"
            "     swp   %0, %1, [%2]"
                  : "=&r" (ret)
                  : "r" (x), "r" (ptr)
                  : "memory", "cc");
            break;
#endif
      default:
            __bad_xchg(ptr, size), ret = 0;
            break;
      }

      return ret;
}

extern void disable_hlt(void);
extern void enable_hlt(void);

#include <asm-generic/cmpxchg-local.h>

/*
 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
 * them available.
 */
#define cmpxchg_local(ptr, o, n)                                   \
      ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
                  (unsigned long)(n), sizeof(*(ptr))))
#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))

#ifndef CONFIG_SMP
#include <asm-generic/cmpxchg.h>
#endif

#endif /* __ASSEMBLY__ */

#define arch_align_stack(x) (x)

#endif /* __KERNEL__ */

#endif

Generated by  Doxygen 1.6.0   Back to index