| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  *  include/asm-s390/uaccess.h | 
 | 3 |  * | 
 | 4 |  *  S390 version | 
 | 5 |  *    Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation | 
 | 6 |  *    Author(s): Hartmut Penner (hp@de.ibm.com), | 
 | 7 |  *               Martin Schwidefsky (schwidefsky@de.ibm.com) | 
 | 8 |  * | 
 | 9 |  *  Derived from "include/asm-i386/uaccess.h" | 
 | 10 |  */ | 
 | 11 | #ifndef __S390_UACCESS_H | 
 | 12 | #define __S390_UACCESS_H | 
 | 13 |  | 
 | 14 | /* | 
 | 15 |  * User space memory access functions | 
 | 16 |  */ | 
 | 17 | #include <linux/sched.h> | 
 | 18 | #include <linux/errno.h> | 
 | 19 |  | 
 | 20 | #define VERIFY_READ     0 | 
 | 21 | #define VERIFY_WRITE    1 | 
 | 22 |  | 
 | 23 |  | 
 | 24 | /* | 
 | 25 |  * The fs value determines whether argument validity checking should be | 
 | 26 |  * performed or not.  If get_fs() == USER_DS, checking is performed, with | 
 | 27 |  * get_fs() == KERNEL_DS, checking is bypassed. | 
 | 28 |  * | 
 | 29 |  * For historical reasons, these macros are grossly misnamed. | 
 | 30 |  */ | 
 | 31 |  | 
 | 32 | #define MAKE_MM_SEG(a)  ((mm_segment_t) { (a) }) | 
 | 33 |  | 
 | 34 |  | 
 | 35 | #define KERNEL_DS       MAKE_MM_SEG(0) | 
 | 36 | #define USER_DS         MAKE_MM_SEG(1) | 
 | 37 |  | 
 | 38 | #define get_ds()        (KERNEL_DS) | 
 | 39 | #define get_fs()        (current->thread.mm_segment) | 
 | 40 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | #define set_fs(x) \ | 
 | 42 | ({									\ | 
 | 43 | 	unsigned long __pto;						\ | 
 | 44 | 	current->thread.mm_segment = (x);				\ | 
 | 45 | 	__pto = current->thread.mm_segment.ar4 ?			\ | 
 | 46 | 		S390_lowcore.user_asce : S390_lowcore.kernel_asce;	\ | 
| Martin Schwidefsky | 94c12cc | 2006-09-28 16:56:43 +0200 | [diff] [blame] | 47 | 	__ctl_load(__pto, 7, 7);					\ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | }) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 49 |  | 
 | 50 | #define segment_eq(a,b) ((a).ar4 == (b).ar4) | 
 | 51 |  | 
 | 52 |  | 
| Al Viro | 793af24 | 2006-02-01 06:55:59 -0500 | [diff] [blame] | 53 | static inline int __access_ok(const void __user *addr, unsigned long size) | 
| Martin Schwidefsky | a63a493 | 2006-01-06 00:19:09 -0800 | [diff] [blame] | 54 | { | 
 | 55 | 	return 1; | 
 | 56 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 57 | #define access_ok(type,addr,size) __access_ok(addr,size) | 
 | 58 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 59 | /* | 
 | 60 |  * The exception table consists of pairs of addresses: the first is the | 
 | 61 |  * address of an instruction that is allowed to fault, and the second is | 
 | 62 |  * the address at which the program should continue.  No registers are | 
 | 63 |  * modified, so it is entirely up to the continuation code to figure out | 
 | 64 |  * what to do. | 
 | 65 |  * | 
 | 66 |  * All the routines below use bits of fixup code that are out of line | 
 | 67 |  * with the main instruction path.  This means when everything is well, | 
 | 68 |  * we don't even have to jump over them.  Further, they do not intrude | 
 | 69 |  * on our cache or tlb entries. | 
 | 70 |  */ | 
 | 71 |  | 
 | 72 | struct exception_table_entry | 
 | 73 | { | 
 | 74 |         unsigned long insn, fixup; | 
 | 75 | }; | 
 | 76 |  | 
| Gerald Schaefer | d02765d | 2006-09-20 15:59:42 +0200 | [diff] [blame] | 77 | struct uaccess_ops { | 
 | 78 | 	size_t (*copy_from_user)(size_t, const void __user *, void *); | 
 | 79 | 	size_t (*copy_from_user_small)(size_t, const void __user *, void *); | 
 | 80 | 	size_t (*copy_to_user)(size_t, void __user *, const void *); | 
 | 81 | 	size_t (*copy_to_user_small)(size_t, void __user *, const void *); | 
 | 82 | 	size_t (*copy_in_user)(size_t, void __user *, const void __user *); | 
 | 83 | 	size_t (*clear_user)(size_t, void __user *); | 
 | 84 | 	size_t (*strnlen_user)(size_t, const char __user *); | 
 | 85 | 	size_t (*strncpy_from_user)(size_t, const char __user *, char *); | 
 | 86 | 	int (*futex_atomic_op)(int op, int __user *, int oparg, int *old); | 
 | 87 | 	int (*futex_atomic_cmpxchg)(int __user *, int old, int new); | 
 | 88 | }; | 
 | 89 |  | 
 | 90 | extern struct uaccess_ops uaccess; | 
 | 91 | extern struct uaccess_ops uaccess_std; | 
| Gerald Schaefer | 6c2a9e6 | 2006-09-20 15:59:44 +0200 | [diff] [blame] | 92 | extern struct uaccess_ops uaccess_mvcos; | 
| Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 93 | extern struct uaccess_ops uaccess_mvcos_switch; | 
 | 94 | extern struct uaccess_ops uaccess_pt; | 
| Gerald Schaefer | d02765d | 2006-09-20 15:59:42 +0200 | [diff] [blame] | 95 |  | 
| Gerald Schaefer | 6c1e3e7 | 2009-12-07 12:51:47 +0100 | [diff] [blame] | 96 | extern int __handle_fault(unsigned long, unsigned long, int); | 
 | 97 |  | 
| Gerald Schaefer | d02765d | 2006-09-20 15:59:42 +0200 | [diff] [blame] | 98 | static inline int __put_user_fn(size_t size, void __user *ptr, void *x) | 
 | 99 | { | 
 | 100 | 	size = uaccess.copy_to_user_small(size, ptr, x); | 
 | 101 | 	return size ? -EFAULT : size; | 
 | 102 | } | 
 | 103 |  | 
 | 104 | static inline int __get_user_fn(size_t size, const void __user *ptr, void *x) | 
 | 105 | { | 
 | 106 | 	size = uaccess.copy_from_user_small(size, ptr, x); | 
 | 107 | 	return size ? -EFAULT : size; | 
 | 108 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 109 |  | 
 | 110 | /* | 
 | 111 |  * These are the main single-value transfer routines.  They automatically | 
 | 112 |  * use the right size if we just have the right pointer type. | 
 | 113 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 114 | #define __put_user(x, ptr) \ | 
 | 115 | ({								\ | 
 | 116 | 	__typeof__(*(ptr)) __x = (x);				\ | 
| Gerald Schaefer | d02765d | 2006-09-20 15:59:42 +0200 | [diff] [blame] | 117 | 	int __pu_err = -EFAULT;					\ | 
| Al Viro | 17566c3 | 2005-08-23 22:48:22 +0100 | [diff] [blame] | 118 |         __chk_user_ptr(ptr);                                    \ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 119 | 	switch (sizeof (*(ptr))) {				\ | 
 | 120 | 	case 1:							\ | 
 | 121 | 	case 2:							\ | 
 | 122 | 	case 4:							\ | 
 | 123 | 	case 8:							\ | 
| Gerald Schaefer | d02765d | 2006-09-20 15:59:42 +0200 | [diff] [blame] | 124 | 		__pu_err = __put_user_fn(sizeof (*(ptr)),	\ | 
 | 125 | 					 ptr, &__x);		\ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 126 | 		break;						\ | 
 | 127 | 	default:						\ | 
 | 128 | 		__put_user_bad();				\ | 
 | 129 | 		break;						\ | 
 | 130 | 	 }							\ | 
 | 131 | 	__pu_err;						\ | 
 | 132 | }) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 133 |  | 
 | 134 | #define put_user(x, ptr)					\ | 
 | 135 | ({								\ | 
| Heiko Carstens | dab4079d | 2009-06-12 10:26:32 +0200 | [diff] [blame] | 136 | 	might_fault();						\ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 137 | 	__put_user(x, ptr);					\ | 
 | 138 | }) | 
 | 139 |  | 
 | 140 |  | 
 | 141 | extern int __put_user_bad(void) __attribute__((noreturn)); | 
 | 142 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 143 | #define __get_user(x, ptr)					\ | 
 | 144 | ({								\ | 
| Gerald Schaefer | d02765d | 2006-09-20 15:59:42 +0200 | [diff] [blame] | 145 | 	int __gu_err = -EFAULT;					\ | 
 | 146 | 	__chk_user_ptr(ptr);					\ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 147 | 	switch (sizeof(*(ptr))) {				\ | 
| Martin Schwidefsky | 1047aa7 | 2005-11-07 00:59:11 -0800 | [diff] [blame] | 148 | 	case 1: {						\ | 
 | 149 | 		unsigned char __x;				\ | 
| Gerald Schaefer | d02765d | 2006-09-20 15:59:42 +0200 | [diff] [blame] | 150 | 		__gu_err = __get_user_fn(sizeof (*(ptr)),	\ | 
 | 151 | 					 ptr, &__x);		\ | 
| Al Viro | 97fa5a6 | 2006-02-03 20:11:52 -0500 | [diff] [blame] | 152 | 		(x) = *(__force __typeof__(*(ptr)) *) &__x;	\ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 153 | 		break;						\ | 
| Martin Schwidefsky | 1047aa7 | 2005-11-07 00:59:11 -0800 | [diff] [blame] | 154 | 	};							\ | 
 | 155 | 	case 2: {						\ | 
 | 156 | 		unsigned short __x;				\ | 
| Gerald Schaefer | d02765d | 2006-09-20 15:59:42 +0200 | [diff] [blame] | 157 | 		__gu_err = __get_user_fn(sizeof (*(ptr)),	\ | 
 | 158 | 					 ptr, &__x);		\ | 
| Al Viro | 97fa5a6 | 2006-02-03 20:11:52 -0500 | [diff] [blame] | 159 | 		(x) = *(__force __typeof__(*(ptr)) *) &__x;	\ | 
| Martin Schwidefsky | 1047aa7 | 2005-11-07 00:59:11 -0800 | [diff] [blame] | 160 | 		break;						\ | 
 | 161 | 	};							\ | 
 | 162 | 	case 4: {						\ | 
 | 163 | 		unsigned int __x;				\ | 
| Gerald Schaefer | d02765d | 2006-09-20 15:59:42 +0200 | [diff] [blame] | 164 | 		__gu_err = __get_user_fn(sizeof (*(ptr)),	\ | 
 | 165 | 					 ptr, &__x);		\ | 
| Al Viro | 97fa5a6 | 2006-02-03 20:11:52 -0500 | [diff] [blame] | 166 | 		(x) = *(__force __typeof__(*(ptr)) *) &__x;	\ | 
| Martin Schwidefsky | 1047aa7 | 2005-11-07 00:59:11 -0800 | [diff] [blame] | 167 | 		break;						\ | 
 | 168 | 	};							\ | 
 | 169 | 	case 8: {						\ | 
 | 170 | 		unsigned long long __x;				\ | 
| Gerald Schaefer | d02765d | 2006-09-20 15:59:42 +0200 | [diff] [blame] | 171 | 		__gu_err = __get_user_fn(sizeof (*(ptr)),	\ | 
 | 172 | 					 ptr, &__x);		\ | 
| Al Viro | 97fa5a6 | 2006-02-03 20:11:52 -0500 | [diff] [blame] | 173 | 		(x) = *(__force __typeof__(*(ptr)) *) &__x;	\ | 
| Martin Schwidefsky | 1047aa7 | 2005-11-07 00:59:11 -0800 | [diff] [blame] | 174 | 		break;						\ | 
 | 175 | 	};							\ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 176 | 	default:						\ | 
 | 177 | 		__get_user_bad();				\ | 
 | 178 | 		break;						\ | 
 | 179 | 	}							\ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 180 | 	__gu_err;						\ | 
 | 181 | }) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 182 |  | 
 | 183 | #define get_user(x, ptr)					\ | 
 | 184 | ({								\ | 
| Heiko Carstens | dab4079d | 2009-06-12 10:26:32 +0200 | [diff] [blame] | 185 | 	might_fault();						\ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 186 | 	__get_user(x, ptr);					\ | 
 | 187 | }) | 
 | 188 |  | 
 | 189 | extern int __get_user_bad(void) __attribute__((noreturn)); | 
 | 190 |  | 
 | 191 | #define __put_user_unaligned __put_user | 
 | 192 | #define __get_user_unaligned __get_user | 
 | 193 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 194 | /** | 
 | 195 |  * __copy_to_user: - Copy a block of data into user space, with less checking. | 
 | 196 |  * @to:   Destination address, in user space. | 
 | 197 |  * @from: Source address, in kernel space. | 
 | 198 |  * @n:    Number of bytes to copy. | 
 | 199 |  * | 
 | 200 |  * Context: User context only.  This function may sleep. | 
 | 201 |  * | 
 | 202 |  * Copy data from kernel space to user space.  Caller must check | 
 | 203 |  * the specified block with access_ok() before calling this function. | 
 | 204 |  * | 
 | 205 |  * Returns number of bytes that could not be copied. | 
 | 206 |  * On success, this will be zero. | 
 | 207 |  */ | 
| Heiko Carstens | f7675ad | 2006-12-04 15:39:55 +0100 | [diff] [blame] | 208 | static inline unsigned long __must_check | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 209 | __copy_to_user(void __user *to, const void *from, unsigned long n) | 
 | 210 | { | 
| Gerald Schaefer | d02765d | 2006-09-20 15:59:42 +0200 | [diff] [blame] | 211 | 	if (__builtin_constant_p(n) && (n <= 256)) | 
 | 212 | 		return uaccess.copy_to_user_small(n, to, from); | 
 | 213 | 	else | 
 | 214 | 		return uaccess.copy_to_user(n, to, from); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 215 | } | 
 | 216 |  | 
 | 217 | #define __copy_to_user_inatomic __copy_to_user | 
 | 218 | #define __copy_from_user_inatomic __copy_from_user | 
 | 219 |  | 
 | 220 | /** | 
 | 221 |  * copy_to_user: - Copy a block of data into user space. | 
 | 222 |  * @to:   Destination address, in user space. | 
 | 223 |  * @from: Source address, in kernel space. | 
 | 224 |  * @n:    Number of bytes to copy. | 
 | 225 |  * | 
 | 226 |  * Context: User context only.  This function may sleep. | 
 | 227 |  * | 
 | 228 |  * Copy data from kernel space to user space. | 
 | 229 |  * | 
 | 230 |  * Returns number of bytes that could not be copied. | 
 | 231 |  * On success, this will be zero. | 
 | 232 |  */ | 
| Heiko Carstens | f7675ad | 2006-12-04 15:39:55 +0100 | [diff] [blame] | 233 | static inline unsigned long __must_check | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 234 | copy_to_user(void __user *to, const void *from, unsigned long n) | 
 | 235 | { | 
| Heiko Carstens | dab4079d | 2009-06-12 10:26:32 +0200 | [diff] [blame] | 236 | 	might_fault(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 237 | 	if (access_ok(VERIFY_WRITE, to, n)) | 
 | 238 | 		n = __copy_to_user(to, from, n); | 
 | 239 | 	return n; | 
 | 240 | } | 
 | 241 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 242 | /** | 
 | 243 |  * __copy_from_user: - Copy a block of data from user space, with less checking. | 
 | 244 |  * @to:   Destination address, in kernel space. | 
 | 245 |  * @from: Source address, in user space. | 
 | 246 |  * @n:    Number of bytes to copy. | 
 | 247 |  * | 
 | 248 |  * Context: User context only.  This function may sleep. | 
 | 249 |  * | 
 | 250 |  * Copy data from user space to kernel space.  Caller must check | 
 | 251 |  * the specified block with access_ok() before calling this function. | 
 | 252 |  * | 
 | 253 |  * Returns number of bytes that could not be copied. | 
 | 254 |  * On success, this will be zero. | 
 | 255 |  * | 
 | 256 |  * If some data could not be copied, this function will pad the copied | 
 | 257 |  * data to the requested size using zero bytes. | 
 | 258 |  */ | 
| Heiko Carstens | f7675ad | 2006-12-04 15:39:55 +0100 | [diff] [blame] | 259 | static inline unsigned long __must_check | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 260 | __copy_from_user(void *to, const void __user *from, unsigned long n) | 
 | 261 | { | 
| Gerald Schaefer | d02765d | 2006-09-20 15:59:42 +0200 | [diff] [blame] | 262 | 	if (__builtin_constant_p(n) && (n <= 256)) | 
 | 263 | 		return uaccess.copy_from_user_small(n, from, to); | 
 | 264 | 	else | 
 | 265 | 		return uaccess.copy_from_user(n, from, to); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 266 | } | 
 | 267 |  | 
| Heiko Carstens | 1dcec25 | 2010-02-26 22:37:22 +0100 | [diff] [blame] | 268 | extern void copy_from_user_overflow(void) | 
 | 269 | #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS | 
 | 270 | __compiletime_warning("copy_from_user() buffer size is not provably correct") | 
 | 271 | #endif | 
 | 272 | ; | 
 | 273 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 274 | /** | 
 | 275 |  * copy_from_user: - Copy a block of data from user space. | 
 | 276 |  * @to:   Destination address, in kernel space. | 
 | 277 |  * @from: Source address, in user space. | 
 | 278 |  * @n:    Number of bytes to copy. | 
 | 279 |  * | 
 | 280 |  * Context: User context only.  This function may sleep. | 
 | 281 |  * | 
 | 282 |  * Copy data from user space to kernel space. | 
 | 283 |  * | 
 | 284 |  * Returns number of bytes that could not be copied. | 
 | 285 |  * On success, this will be zero. | 
 | 286 |  * | 
 | 287 |  * If some data could not be copied, this function will pad the copied | 
 | 288 |  * data to the requested size using zero bytes. | 
 | 289 |  */ | 
| Heiko Carstens | f7675ad | 2006-12-04 15:39:55 +0100 | [diff] [blame] | 290 | static inline unsigned long __must_check | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 291 | copy_from_user(void *to, const void __user *from, unsigned long n) | 
 | 292 | { | 
| Heiko Carstens | 1dcec25 | 2010-02-26 22:37:22 +0100 | [diff] [blame] | 293 | 	unsigned int sz = __compiletime_object_size(to); | 
 | 294 |  | 
| Heiko Carstens | dab4079d | 2009-06-12 10:26:32 +0200 | [diff] [blame] | 295 | 	might_fault(); | 
| Heiko Carstens | 1dcec25 | 2010-02-26 22:37:22 +0100 | [diff] [blame] | 296 | 	if (unlikely(sz != -1 && sz < n)) { | 
 | 297 | 		copy_from_user_overflow(); | 
 | 298 | 		return n; | 
 | 299 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 300 | 	if (access_ok(VERIFY_READ, from, n)) | 
 | 301 | 		n = __copy_from_user(to, from, n); | 
 | 302 | 	else | 
 | 303 | 		memset(to, 0, n); | 
 | 304 | 	return n; | 
 | 305 | } | 
 | 306 |  | 
| Heiko Carstens | f7675ad | 2006-12-04 15:39:55 +0100 | [diff] [blame] | 307 | static inline unsigned long __must_check | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 308 | __copy_in_user(void __user *to, const void __user *from, unsigned long n) | 
 | 309 | { | 
| Gerald Schaefer | d02765d | 2006-09-20 15:59:42 +0200 | [diff] [blame] | 310 | 	return uaccess.copy_in_user(n, to, from); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 311 | } | 
 | 312 |  | 
| Heiko Carstens | f7675ad | 2006-12-04 15:39:55 +0100 | [diff] [blame] | 313 | static inline unsigned long __must_check | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 314 | copy_in_user(void __user *to, const void __user *from, unsigned long n) | 
 | 315 | { | 
| Heiko Carstens | dab4079d | 2009-06-12 10:26:32 +0200 | [diff] [blame] | 316 | 	might_fault(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 317 | 	if (__access_ok(from,n) && __access_ok(to,n)) | 
| Gerald Schaefer | d02765d | 2006-09-20 15:59:42 +0200 | [diff] [blame] | 318 | 		n = __copy_in_user(to, from, n); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 319 | 	return n; | 
 | 320 | } | 
 | 321 |  | 
 | 322 | /* | 
 | 323 |  * Copy a null terminated string from userspace. | 
 | 324 |  */ | 
| Heiko Carstens | f7675ad | 2006-12-04 15:39:55 +0100 | [diff] [blame] | 325 | static inline long __must_check | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 326 | strncpy_from_user(char *dst, const char __user *src, long count) | 
 | 327 | { | 
 | 328 |         long res = -EFAULT; | 
| Heiko Carstens | dab4079d | 2009-06-12 10:26:32 +0200 | [diff] [blame] | 329 | 	might_fault(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 330 |         if (access_ok(VERIFY_READ, src, 1)) | 
| Gerald Schaefer | d02765d | 2006-09-20 15:59:42 +0200 | [diff] [blame] | 331 | 		res = uaccess.strncpy_from_user(count, src, dst); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 332 |         return res; | 
 | 333 | } | 
 | 334 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 335 | static inline unsigned long | 
 | 336 | strnlen_user(const char __user * src, unsigned long n) | 
 | 337 | { | 
| Heiko Carstens | dab4079d | 2009-06-12 10:26:32 +0200 | [diff] [blame] | 338 | 	might_fault(); | 
| Gerald Schaefer | d02765d | 2006-09-20 15:59:42 +0200 | [diff] [blame] | 339 | 	return uaccess.strnlen_user(n, src); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 340 | } | 
 | 341 |  | 
 | 342 | /** | 
 | 343 |  * strlen_user: - Get the size of a string in user space. | 
 | 344 |  * @str: The string to measure. | 
 | 345 |  * | 
 | 346 |  * Context: User context only.  This function may sleep. | 
 | 347 |  * | 
 | 348 |  * Get the size of a NUL-terminated string in user space. | 
 | 349 |  * | 
 | 350 |  * Returns the size of the string INCLUDING the terminating NUL. | 
 | 351 |  * On exception, returns 0. | 
 | 352 |  * | 
 | 353 |  * If there is a limit on the length of a valid string, you may wish to | 
 | 354 |  * consider using strnlen_user() instead. | 
 | 355 |  */ | 
 | 356 | #define strlen_user(str) strnlen_user(str, ~0UL) | 
 | 357 |  | 
 | 358 | /* | 
 | 359 |  * Zero Userspace | 
 | 360 |  */ | 
 | 361 |  | 
| Heiko Carstens | f7675ad | 2006-12-04 15:39:55 +0100 | [diff] [blame] | 362 | static inline unsigned long __must_check | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 363 | __clear_user(void __user *to, unsigned long n) | 
 | 364 | { | 
| Gerald Schaefer | d02765d | 2006-09-20 15:59:42 +0200 | [diff] [blame] | 365 | 	return uaccess.clear_user(n, to); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 366 | } | 
 | 367 |  | 
| Heiko Carstens | f7675ad | 2006-12-04 15:39:55 +0100 | [diff] [blame] | 368 | static inline unsigned long __must_check | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 369 | clear_user(void __user *to, unsigned long n) | 
 | 370 | { | 
| Heiko Carstens | dab4079d | 2009-06-12 10:26:32 +0200 | [diff] [blame] | 371 | 	might_fault(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 372 | 	if (access_ok(VERIFY_WRITE, to, n)) | 
| Gerald Schaefer | d02765d | 2006-09-20 15:59:42 +0200 | [diff] [blame] | 373 | 		n = uaccess.clear_user(n, to); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 374 | 	return n; | 
 | 375 | } | 
 | 376 |  | 
 | 377 | #endif /* __S390_UACCESS_H */ |