Disintegrate asm/system.h for S390

Disintegrate asm/system.h for S390.

Signed-off-by: David Howells <dhowells@redhat.com>
cc: linux-s390@vger.kernel.org
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
index 8517d2a..748347b 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
@@ -15,7 +15,7 @@
 
 #include <linux/compiler.h>
 #include <linux/types.h>
-#include <asm/system.h>
+#include <asm/cmpxchg.h>
 
 #define ATOMIC_INIT(i)  { (i) }
 
diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
new file mode 100644
index 0000000..451273a
--- /dev/null
+++ b/arch/s390/include/asm/barrier.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright IBM Corp. 1999, 2009
+ *
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#ifndef __ASM_BARRIER_H
+#define __ASM_BARRIER_H
+
+/*
+ * Force strict CPU ordering.
+ * And yes, this is required on UP too when we're talking
+ * to devices.
+ *
+ * This is very similar to the ppc eieio/sync instruction in that is
+ * does a checkpoint syncronisation & makes sure that 
+ * all memory ops have completed wrt other CPU's ( see 7-15 POP  DJB ).
+ */
+
+#define eieio()	asm volatile("bcr 15,0" : : : "memory")
+#define SYNC_OTHER_CORES(x)   eieio()
+#define mb()    eieio()
+#define rmb()   eieio()
+#define wmb()   eieio()
+#define read_barrier_depends() do { } while(0)
+#define smp_mb()       mb()
+#define smp_rmb()      rmb()
+#define smp_wmb()      wmb()
+#define smp_read_barrier_depends()    read_barrier_depends()
+#define smp_mb__before_clear_bit()     smp_mb()
+#define smp_mb__after_clear_bit()      smp_mb()
+
+#define set_mb(var, value)      do { var = value; mb(); } while (0)
+
+#endif /* __ASM_BARRIER_H */
diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h
new file mode 100644
index 0000000..ecde941
--- /dev/null
+++ b/arch/s390/include/asm/ctl_reg.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright IBM Corp. 1999, 2009
+ *
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#ifndef __ASM_CTL_REG_H
+#define __ASM_CTL_REG_H
+
+#ifdef __s390x__
+
+#define __ctl_load(array, low, high) ({				\
+	typedef struct { char _[sizeof(array)]; } addrtype;	\
+	asm volatile(						\
+		"	lctlg	%1,%2,%0\n"			\
+		: : "Q" (*(addrtype *)(&array)),		\
+		    "i" (low), "i" (high));			\
+	})
+
+#define __ctl_store(array, low, high) ({			\
+	typedef struct { char _[sizeof(array)]; } addrtype;	\
+	asm volatile(						\
+		"	stctg	%1,%2,%0\n"			\
+		: "=Q" (*(addrtype *)(&array))			\
+		: "i" (low), "i" (high));			\
+	})
+
+#else /* __s390x__ */
+
+#define __ctl_load(array, low, high) ({				\
+	typedef struct { char _[sizeof(array)]; } addrtype;	\
+	asm volatile(						\
+		"	lctl	%1,%2,%0\n"			\
+		: : "Q" (*(addrtype *)(&array)),		\
+		    "i" (low), "i" (high));			\
+})
+
+#define __ctl_store(array, low, high) ({			\
+	typedef struct { char _[sizeof(array)]; } addrtype;	\
+	asm volatile(						\
+		"	stctl	%1,%2,%0\n"			\
+		: "=Q" (*(addrtype *)(&array))			\
+		: "i" (low), "i" (high));			\
+	})
+
+#endif /* __s390x__ */
+
+#define __ctl_set_bit(cr, bit) ({	\
+	unsigned long __dummy;		\
+	__ctl_store(__dummy, cr, cr);	\
+	__dummy |= 1UL << (bit);	\
+	__ctl_load(__dummy, cr, cr);	\
+})
+
+#define __ctl_clear_bit(cr, bit) ({	\
+	unsigned long __dummy;		\
+	__ctl_store(__dummy, cr, cr);	\
+	__dummy &= ~(1UL << (bit));	\
+	__ctl_load(__dummy, cr, cr);	\
+})
+
+#ifdef CONFIG_SMP
+
+extern void smp_ctl_set_bit(int cr, int bit);
+extern void smp_ctl_clear_bit(int cr, int bit);
+#define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit)
+#define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit)
+
+#else
+
+#define ctl_set_bit(cr, bit) __ctl_set_bit(cr, bit)
+#define ctl_clear_bit(cr, bit) __ctl_clear_bit(cr, bit)
+
+#endif /* CONFIG_SMP */
+
+#endif /* __ASM_CTL_REG_H */
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index 547f1a6..c4ee39f 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -129,7 +129,6 @@
 typedef s390_compat_regs compat_elf_gregset_t;
 
 #include <linux/sched.h>	/* for task_struct */
-#include <asm/system.h>		/* for save_access_regs */
 #include <asm/mmu_context.h>
 
 #include <asm/vdso.h>
diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h
new file mode 100644
index 0000000..c4a93d6
--- /dev/null
+++ b/arch/s390/include/asm/exec.h
@@ -0,0 +1,12 @@
+/*
+ * Copyright IBM Corp. 1999, 2009
+ *
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#ifndef __ASM_EXEC_H
+#define __ASM_EXEC_H
+
+extern unsigned long arch_align_stack(unsigned long sp);
+
+#endif /* __ASM_EXEC_H */
diff --git a/arch/s390/include/asm/facility.h b/arch/s390/include/asm/facility.h
new file mode 100644
index 0000000..1e5b27e
--- /dev/null
+++ b/arch/s390/include/asm/facility.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright IBM Corp. 1999, 2009
+ *
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#ifndef __ASM_FACILITY_H
+#define __ASM_FACILITY_H
+
+#include <linux/string.h>
+#include <linux/preempt.h>
+#include <asm/lowcore.h>
+
+#define MAX_FACILITY_BIT (256*8)	/* stfle_fac_list has 256 bytes */
+
+/*
+ * The test_facility function uses the bit odering where the MSB is bit 0.
+ * That makes it easier to query facility bits with the bit number as
+ * documented in the Principles of Operation.
+ */
+static inline int test_facility(unsigned long nr)
+{
+	unsigned char *ptr;
+
+	if (nr >= MAX_FACILITY_BIT)
+		return 0;
+	ptr = (unsigned char *) &S390_lowcore.stfle_fac_list + (nr >> 3);
+	return (*ptr & (0x80 >> (nr & 7))) != 0;
+}
+
+/**
+ * stfle - Store facility list extended
+ * @stfle_fac_list: array where facility list can be stored
+ * @size: size of passed in array in double words
+ */
+static inline void stfle(u64 *stfle_fac_list, int size)
+{
+	unsigned long nr;
+
+	preempt_disable();
+	S390_lowcore.stfl_fac_list = 0;
+	asm volatile(
+		"	.insn s,0xb2b10000,0(0)\n" /* stfl */
+		"0:\n"
+		EX_TABLE(0b, 0b)
+		: "=m" (S390_lowcore.stfl_fac_list));
+	nr = 4; /* bytes stored by stfl */
+	memcpy(stfle_fac_list, &S390_lowcore.stfl_fac_list, 4);
+	if (S390_lowcore.stfl_fac_list & 0x01000000) {
+		/* More facility bits available with stfle */
+		register unsigned long reg0 asm("0") = size - 1;
+
+		asm volatile(".insn s,0xb2b00000,0(%1)" /* stfle */
+			     : "+d" (reg0)
+			     : "a" (stfle_fac_list)
+			     : "memory", "cc");
+		nr = (reg0 + 1) * 8; /* # bytes stored by stfle */
+	}
+	memset((char *) stfle_fac_list + nr, 0, size * 8 - nr);
+	preempt_enable();
+}
+
+#endif /* __ASM_FACILITY_H */
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h
index 4506791..1c7d6ce 100644
--- a/arch/s390/include/asm/mmu.h
+++ b/arch/s390/include/asm/mmu.h
@@ -21,4 +21,18 @@
 	.context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list),    \
 	.context.gmap_list = LIST_HEAD_INIT(name.context.gmap_list),
 
+static inline int tprot(unsigned long addr)
+{
+	int rc = -EFAULT;
+
+	asm volatile(
+		"	tprot	0(%1),0\n"
+		"0:	ipm	%0\n"
+		"	srl	%0,28\n"
+		"1:\n"
+		EX_TABLE(0b,1b)
+		: "+d" (rc) : "a" (addr) : "cc");
+	return rc;
+}
+
 #endif
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index 5682f16..5d09e40 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -12,6 +12,7 @@
 #include <asm/pgalloc.h>
 #include <asm/uaccess.h>
 #include <asm/tlbflush.h>
+#include <asm/ctl_reg.h>
 #include <asm-generic/mm_hooks.h>
 
 static inline int init_new_context(struct task_struct *tsk,
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index d25843a..d499b30 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -14,6 +14,7 @@
 #define __ASM_S390_PROCESSOR_H
 
 #include <linux/linkage.h>
+#include <linux/irqflags.h>
 #include <asm/cpu.h>
 #include <asm/page.h>
 #include <asm/ptrace.h>
@@ -156,6 +157,14 @@
 #define KSTK_EIP(tsk)	(task_pt_regs(tsk)->psw.addr)
 #define KSTK_ESP(tsk)	(task_pt_regs(tsk)->gprs[15])
 
+static inline unsigned short stap(void)
+{
+	unsigned short cpu_address;
+
+	asm volatile("stap %0" : "=m" (cpu_address));
+	return cpu_address;
+}
+
 /*
  * Give up the time slice of the virtual PU.
  */
@@ -304,6 +313,21 @@
 }
 
 /*
+ * Use to set psw mask except for the first byte which
+ * won't be changed by this function.
+ */
+static inline void
+__set_psw_mask(unsigned long mask)
+{
+	__load_psw_mask(mask | (arch_local_save_flags() & ~(-1UL >> 8)));
+}
+
+#define local_mcck_enable() \
+	__set_psw_mask(psw_kernel_bits | PSW_MASK_DAT | PSW_MASK_MCHECK)
+#define local_mcck_disable() \
+	__set_psw_mask(psw_kernel_bits | PSW_MASK_DAT)
+
+/*
  * Basic Machine Check/Program Check Handler.
  */
 
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index 097183c..b21e46e 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -140,6 +140,20 @@
 #define NSS_NAME_SIZE	8
 extern char kernel_nss_name[];
 
+#ifdef CONFIG_PFAULT
+extern int pfault_init(void);
+extern void pfault_fini(void);
+#else /* CONFIG_PFAULT */
+#define pfault_init()		({-1;})
+#define pfault_fini()		do { } while (0)
+#endif /* CONFIG_PFAULT */
+
+extern void cmma_init(void);
+
+extern void (*_machine_restart)(char *command);
+extern void (*_machine_halt)(void);
+extern void (*_machine_power_off)(void);
+
 #else /* __ASSEMBLY__ */
 
 #ifndef __s390x__
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index 797f787..c77c6de 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -9,7 +9,7 @@
 
 #ifdef CONFIG_SMP
 
-#include <asm/system.h>
+#include <asm/lowcore.h>
 
 #define raw_smp_processor_id()	(S390_lowcore.cpu_nr)
 
diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h
new file mode 100644
index 0000000..f223068
--- /dev/null
+++ b/arch/s390/include/asm/switch_to.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright IBM Corp. 1999, 2009
+ *
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#ifndef __ASM_SWITCH_TO_H
+#define __ASM_SWITCH_TO_H
+
+#include <linux/thread_info.h>
+
+extern struct task_struct *__switch_to(void *, void *);
+extern void update_per_regs(struct task_struct *task);
+
+static inline void save_fp_regs(s390_fp_regs *fpregs)
+{
+	asm volatile(
+		"	std	0,%O0+8(%R0)\n"
+		"	std	2,%O0+24(%R0)\n"
+		"	std	4,%O0+40(%R0)\n"
+		"	std	6,%O0+56(%R0)"
+		: "=Q" (*fpregs) : "Q" (*fpregs));
+	if (!MACHINE_HAS_IEEE)
+		return;
+	asm volatile(
+		"	stfpc	%0\n"
+		"	std	1,%O0+16(%R0)\n"
+		"	std	3,%O0+32(%R0)\n"
+		"	std	5,%O0+48(%R0)\n"
+		"	std	7,%O0+64(%R0)\n"
+		"	std	8,%O0+72(%R0)\n"
+		"	std	9,%O0+80(%R0)\n"
+		"	std	10,%O0+88(%R0)\n"
+		"	std	11,%O0+96(%R0)\n"
+		"	std	12,%O0+104(%R0)\n"
+		"	std	13,%O0+112(%R0)\n"
+		"	std	14,%O0+120(%R0)\n"
+		"	std	15,%O0+128(%R0)\n"
+		: "=Q" (*fpregs) : "Q" (*fpregs));
+}
+
+static inline void restore_fp_regs(s390_fp_regs *fpregs)
+{
+	asm volatile(
+		"	ld	0,%O0+8(%R0)\n"
+		"	ld	2,%O0+24(%R0)\n"
+		"	ld	4,%O0+40(%R0)\n"
+		"	ld	6,%O0+56(%R0)"
+		: : "Q" (*fpregs));
+	if (!MACHINE_HAS_IEEE)
+		return;
+	asm volatile(
+		"	lfpc	%0\n"
+		"	ld	1,%O0+16(%R0)\n"
+		"	ld	3,%O0+32(%R0)\n"
+		"	ld	5,%O0+48(%R0)\n"
+		"	ld	7,%O0+64(%R0)\n"
+		"	ld	8,%O0+72(%R0)\n"
+		"	ld	9,%O0+80(%R0)\n"
+		"	ld	10,%O0+88(%R0)\n"
+		"	ld	11,%O0+96(%R0)\n"
+		"	ld	12,%O0+104(%R0)\n"
+		"	ld	13,%O0+112(%R0)\n"
+		"	ld	14,%O0+120(%R0)\n"
+		"	ld	15,%O0+128(%R0)\n"
+		: : "Q" (*fpregs));
+}
+
+static inline void save_access_regs(unsigned int *acrs)
+{
+	asm volatile("stam 0,15,%0" : "=Q" (*acrs));
+}
+
+static inline void restore_access_regs(unsigned int *acrs)
+{
+	asm volatile("lam 0,15,%0" : : "Q" (*acrs));
+}
+
+#define switch_to(prev,next,last) do {					\
+	if (prev->mm) {							\
+		save_fp_regs(&prev->thread.fp_regs);			\
+		save_access_regs(&prev->thread.acrs[0]);		\
+	}								\
+	if (next->mm) {							\
+		restore_fp_regs(&next->thread.fp_regs);			\
+		restore_access_regs(&next->thread.acrs[0]);		\
+		update_per_regs(next);					\
+	}								\
+	prev = __switch_to(prev,next);					\
+} while (0)
+
+extern void account_vtime(struct task_struct *, struct task_struct *);
+extern void account_tick_vtime(struct task_struct *);
+
+#define finish_arch_switch(prev) do {					     \
+	set_fs(current->thread.mm_segment);				     \
+	account_vtime(prev, current);					     \
+} while (0)
+
+#endif /* __ASM_SWITCH_TO_H */
diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h
index 2e0bb7f..641c729 100644
--- a/arch/s390/include/asm/system.h
+++ b/arch/s390/include/asm/system.h
@@ -1,315 +1,7 @@
-/*
- * Copyright IBM Corp. 1999, 2009
- *
- * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
- */
-
-#ifndef __ASM_SYSTEM_H
-#define __ASM_SYSTEM_H
-
-#include <linux/preempt.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <asm/types.h>
-#include <asm/ptrace.h>
-#include <asm/setup.h>
-#include <asm/processor.h>
-#include <asm/lowcore.h>
+/* FILE TO BE DELETED. DO NOT ADD STUFF HERE! */
+#include <asm/barrier.h>
 #include <asm/cmpxchg.h>
-
-#ifdef __KERNEL__
-
-struct task_struct;
-
-extern struct task_struct *__switch_to(void *, void *);
-extern void update_per_regs(struct task_struct *task);
-
-static inline void save_fp_regs(s390_fp_regs *fpregs)
-{
-	asm volatile(
-		"	std	0,%O0+8(%R0)\n"
-		"	std	2,%O0+24(%R0)\n"
-		"	std	4,%O0+40(%R0)\n"
-		"	std	6,%O0+56(%R0)"
-		: "=Q" (*fpregs) : "Q" (*fpregs));
-	if (!MACHINE_HAS_IEEE)
-		return;
-	asm volatile(
-		"	stfpc	%0\n"
-		"	std	1,%O0+16(%R0)\n"
-		"	std	3,%O0+32(%R0)\n"
-		"	std	5,%O0+48(%R0)\n"
-		"	std	7,%O0+64(%R0)\n"
-		"	std	8,%O0+72(%R0)\n"
-		"	std	9,%O0+80(%R0)\n"
-		"	std	10,%O0+88(%R0)\n"
-		"	std	11,%O0+96(%R0)\n"
-		"	std	12,%O0+104(%R0)\n"
-		"	std	13,%O0+112(%R0)\n"
-		"	std	14,%O0+120(%R0)\n"
-		"	std	15,%O0+128(%R0)\n"
-		: "=Q" (*fpregs) : "Q" (*fpregs));
-}
-
-static inline void restore_fp_regs(s390_fp_regs *fpregs)
-{
-	asm volatile(
-		"	ld	0,%O0+8(%R0)\n"
-		"	ld	2,%O0+24(%R0)\n"
-		"	ld	4,%O0+40(%R0)\n"
-		"	ld	6,%O0+56(%R0)"
-		: : "Q" (*fpregs));
-	if (!MACHINE_HAS_IEEE)
-		return;
-	asm volatile(
-		"	lfpc	%0\n"
-		"	ld	1,%O0+16(%R0)\n"
-		"	ld	3,%O0+32(%R0)\n"
-		"	ld	5,%O0+48(%R0)\n"
-		"	ld	7,%O0+64(%R0)\n"
-		"	ld	8,%O0+72(%R0)\n"
-		"	ld	9,%O0+80(%R0)\n"
-		"	ld	10,%O0+88(%R0)\n"
-		"	ld	11,%O0+96(%R0)\n"
-		"	ld	12,%O0+104(%R0)\n"
-		"	ld	13,%O0+112(%R0)\n"
-		"	ld	14,%O0+120(%R0)\n"
-		"	ld	15,%O0+128(%R0)\n"
-		: : "Q" (*fpregs));
-}
-
-static inline void save_access_regs(unsigned int *acrs)
-{
-	asm volatile("stam 0,15,%0" : "=Q" (*acrs));
-}
-
-static inline void restore_access_regs(unsigned int *acrs)
-{
-	asm volatile("lam 0,15,%0" : : "Q" (*acrs));
-}
-
-#define switch_to(prev,next,last) do {					\
-	if (prev->mm) {							\
-		save_fp_regs(&prev->thread.fp_regs);			\
-		save_access_regs(&prev->thread.acrs[0]);		\
-	}								\
-	if (next->mm) {							\
-		restore_fp_regs(&next->thread.fp_regs);			\
-		restore_access_regs(&next->thread.acrs[0]);		\
-		update_per_regs(next);					\
-	}								\
-	prev = __switch_to(prev,next);					\
-} while (0)
-
-extern void account_vtime(struct task_struct *, struct task_struct *);
-extern void account_tick_vtime(struct task_struct *);
-
-#ifdef CONFIG_PFAULT
-extern int pfault_init(void);
-extern void pfault_fini(void);
-#else /* CONFIG_PFAULT */
-#define pfault_init()		({-1;})
-#define pfault_fini()		do { } while (0)
-#endif /* CONFIG_PFAULT */
-
-extern void cmma_init(void);
-extern int memcpy_real(void *, void *, size_t);
-extern void copy_to_absolute_zero(void *dest, void *src, size_t count);
-extern int copy_to_user_real(void __user *dest, void *src, size_t count);
-extern int copy_from_user_real(void *dest, void __user *src, size_t count);
-
-#define finish_arch_switch(prev) do {					     \
-	set_fs(current->thread.mm_segment);				     \
-	account_vtime(prev, current);					     \
-} while (0)
-
-#define nop() asm volatile("nop")
-
-/*
- * Force strict CPU ordering.
- * And yes, this is required on UP too when we're talking
- * to devices.
- *
- * This is very similar to the ppc eieio/sync instruction in that is
- * does a checkpoint syncronisation & makes sure that 
- * all memory ops have completed wrt other CPU's ( see 7-15 POP  DJB ).
- */
-
-#define eieio()	asm volatile("bcr 15,0" : : : "memory")
-#define SYNC_OTHER_CORES(x)   eieio()
-#define mb()    eieio()
-#define rmb()   eieio()
-#define wmb()   eieio()
-#define read_barrier_depends() do { } while(0)
-#define smp_mb()       mb()
-#define smp_rmb()      rmb()
-#define smp_wmb()      wmb()
-#define smp_read_barrier_depends()    read_barrier_depends()
-#define smp_mb__before_clear_bit()     smp_mb()
-#define smp_mb__after_clear_bit()      smp_mb()
-
-
-#define set_mb(var, value)      do { var = value; mb(); } while (0)
-
-#ifdef __s390x__
-
-#define __ctl_load(array, low, high) ({				\
-	typedef struct { char _[sizeof(array)]; } addrtype;	\
-	asm volatile(						\
-		"	lctlg	%1,%2,%0\n"			\
-		: : "Q" (*(addrtype *)(&array)),		\
-		    "i" (low), "i" (high));			\
-	})
-
-#define __ctl_store(array, low, high) ({			\
-	typedef struct { char _[sizeof(array)]; } addrtype;	\
-	asm volatile(						\
-		"	stctg	%1,%2,%0\n"			\
-		: "=Q" (*(addrtype *)(&array))			\
-		: "i" (low), "i" (high));			\
-	})
-
-#else /* __s390x__ */
-
-#define __ctl_load(array, low, high) ({				\
-	typedef struct { char _[sizeof(array)]; } addrtype;	\
-	asm volatile(						\
-		"	lctl	%1,%2,%0\n"			\
-		: : "Q" (*(addrtype *)(&array)),		\
-		    "i" (low), "i" (high));			\
-})
-
-#define __ctl_store(array, low, high) ({			\
-	typedef struct { char _[sizeof(array)]; } addrtype;	\
-	asm volatile(						\
-		"	stctl	%1,%2,%0\n"			\
-		: "=Q" (*(addrtype *)(&array))			\
-		: "i" (low), "i" (high));			\
-	})
-
-#endif /* __s390x__ */
-
-#define __ctl_set_bit(cr, bit) ({	\
-	unsigned long __dummy;		\
-	__ctl_store(__dummy, cr, cr);	\
-	__dummy |= 1UL << (bit);	\
-	__ctl_load(__dummy, cr, cr);	\
-})
-
-#define __ctl_clear_bit(cr, bit) ({	\
-	unsigned long __dummy;		\
-	__ctl_store(__dummy, cr, cr);	\
-	__dummy &= ~(1UL << (bit));	\
-	__ctl_load(__dummy, cr, cr);	\
-})
-
-/*
- * Use to set psw mask except for the first byte which
- * won't be changed by this function.
- */
-static inline void
-__set_psw_mask(unsigned long mask)
-{
-	__load_psw_mask(mask | (arch_local_save_flags() & ~(-1UL >> 8)));
-}
-
-#define local_mcck_enable() \
-	__set_psw_mask(psw_kernel_bits | PSW_MASK_DAT | PSW_MASK_MCHECK)
-#define local_mcck_disable() \
-	__set_psw_mask(psw_kernel_bits | PSW_MASK_DAT)
-
-#ifdef CONFIG_SMP
-
-extern void smp_ctl_set_bit(int cr, int bit);
-extern void smp_ctl_clear_bit(int cr, int bit);
-#define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit)
-#define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit)
-
-#else
-
-#define ctl_set_bit(cr, bit) __ctl_set_bit(cr, bit)
-#define ctl_clear_bit(cr, bit) __ctl_clear_bit(cr, bit)
-
-#endif /* CONFIG_SMP */
-
-#define MAX_FACILITY_BIT (256*8)	/* stfle_fac_list has 256 bytes */
-
-/*
- * The test_facility function uses the bit odering where the MSB is bit 0.
- * That makes it easier to query facility bits with the bit number as
- * documented in the Principles of Operation.
- */
-static inline int test_facility(unsigned long nr)
-{
-	unsigned char *ptr;
-
-	if (nr >= MAX_FACILITY_BIT)
-		return 0;
-	ptr = (unsigned char *) &S390_lowcore.stfle_fac_list + (nr >> 3);
-	return (*ptr & (0x80 >> (nr & 7))) != 0;
-}
-
-/**
- * stfle - Store facility list extended
- * @stfle_fac_list: array where facility list can be stored
- * @size: size of passed in array in double words
- */
-static inline void stfle(u64 *stfle_fac_list, int size)
-{
-	unsigned long nr;
-
-	preempt_disable();
-	S390_lowcore.stfl_fac_list = 0;
-	asm volatile(
-		"	.insn s,0xb2b10000,0(0)\n" /* stfl */
-		"0:\n"
-		EX_TABLE(0b, 0b)
-		: "=m" (S390_lowcore.stfl_fac_list));
-	nr = 4; /* bytes stored by stfl */
-	memcpy(stfle_fac_list, &S390_lowcore.stfl_fac_list, 4);
-	if (S390_lowcore.stfl_fac_list & 0x01000000) {
-		/* More facility bits available with stfle */
-		register unsigned long reg0 asm("0") = size - 1;
-
-		asm volatile(".insn s,0xb2b00000,0(%1)" /* stfle */
-			     : "+d" (reg0)
-			     : "a" (stfle_fac_list)
-			     : "memory", "cc");
-		nr = (reg0 + 1) * 8; /* # bytes stored by stfle */
-	}
-	memset((char *) stfle_fac_list + nr, 0, size * 8 - nr);
-	preempt_enable();
-}
-
-static inline unsigned short stap(void)
-{
-	unsigned short cpu_address;
-
-	asm volatile("stap %0" : "=m" (cpu_address));
-	return cpu_address;
-}
-
-extern void (*_machine_restart)(char *command);
-extern void (*_machine_halt)(void);
-extern void (*_machine_power_off)(void);
-
-extern unsigned long arch_align_stack(unsigned long sp);
-
-static inline int tprot(unsigned long addr)
-{
-	int rc = -EFAULT;
-
-	asm volatile(
-		"	tprot	0(%1),0\n"
-		"0:	ipm	%0\n"
-		"	srl	%0,28\n"
-		"1:\n"
-		EX_TABLE(0b,1b)
-		: "+d" (rc) : "a" (addr) : "cc");
-	return rc;
-}
-
-#endif /* __KERNEL__ */
-
-#endif
+#include <asm/ctl_reg.h>
+#include <asm/exec.h>
+#include <asm/facility.h>
+#include <asm/switch_to.h>
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index 2b23885..8f2cada 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -16,6 +16,7 @@
  */
 #include <linux/sched.h>
 #include <linux/errno.h>
+#include <asm/ctl_reg.h>
 
 #define VERIFY_READ     0
 #define VERIFY_WRITE    1
@@ -375,4 +376,9 @@
 	return n;
 }
 
+extern int memcpy_real(void *, void *, size_t);
+extern void copy_to_absolute_zero(void *dest, void *src, size_t count);
+extern int copy_to_user_real(void __user *dest, void *src, size_t count);
+extern int copy_from_user_real(void *dest, void __user *src, size_t count);
+
 #endif /* __S390_UACCESS_H */