2 * linux/arch/sh/kernel/sys_sh.c
4 * This file contains various random system calls that
5 * have a non-standard calling sequence on the Linux/SuperH
8 * Taken from i386 version.
11 #include <linux/errno.h>
12 #include <linux/sched.h>
14 #include <linux/smp.h>
15 #include <linux/smp_lock.h>
16 #include <linux/sem.h>
17 #include <linux/msg.h>
18 #include <linux/shm.h>
19 #include <linux/stat.h>
20 #include <linux/syscalls.h>
21 #include <linux/mman.h>
22 #include <linux/file.h>
23 #include <linux/utsname.h>
24 #include <linux/module.h>
25 #include <asm/cacheflush.h>
26 #include <asm/uaccess.h>
30 * sys_pipe() is the normal C calling standard for creating
31 * a pipe. It's not the way Unix traditionally does this, though.
33 asmlinkage int sys_pipe(unsigned long r4, unsigned long r5,
34 unsigned long r6, unsigned long r7,
48 unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
50 EXPORT_SYMBOL(shm_align_mask);
53 * To avoid cache aliases, we map the shared page with same color.
55 #define COLOUR_ALIGN(addr, pgoff) \
56 ((((addr) + shm_align_mask) & ~shm_align_mask) + \
57 (((pgoff) << PAGE_SHIFT) & shm_align_mask))
59 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
60 unsigned long len, unsigned long pgoff, unsigned long flags)
62 struct mm_struct *mm = current->mm;
63 struct vm_area_struct *vma;
64 unsigned long start_addr;
67 if (flags & MAP_FIXED) {
68 /* We do not accept a shared mapping if it would violate
69 * cache aliasing constraints.
71 if ((flags & MAP_SHARED) && (addr & shm_align_mask))
76 if (unlikely(len > TASK_SIZE))
80 if (filp || (flags & MAP_SHARED))
85 addr = COLOUR_ALIGN(addr, pgoff);
87 addr = PAGE_ALIGN(addr);
89 vma = find_vma(mm, addr);
90 if (TASK_SIZE - len >= addr &&
91 (!vma || addr + len <= vma->vm_start))
95 if (len > mm->cached_hole_size) {
96 start_addr = addr = mm->free_area_cache;
98 mm->cached_hole_size = 0;
99 start_addr = addr = TASK_UNMAPPED_BASE;
104 addr = COLOUR_ALIGN(addr, pgoff);
106 addr = PAGE_ALIGN(mm->free_area_cache);
108 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
109 /* At this point: (!vma || addr < vma->vm_end). */
110 if (unlikely(TASK_SIZE - len < addr)) {
112 * Start a new search - just in case we missed
115 if (start_addr != TASK_UNMAPPED_BASE) {
116 start_addr = addr = TASK_UNMAPPED_BASE;
117 mm->cached_hole_size = 0;
122 if (likely(!vma || addr + len <= vma->vm_start)) {
124 * Remember the place where we stopped the search:
126 mm->free_area_cache = addr + len;
129 if (addr + mm->cached_hole_size < vma->vm_start)
130 mm->cached_hole_size = vma->vm_start - addr;
134 addr = COLOUR_ALIGN(addr, pgoff);
139 do_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
140 unsigned long flags, int fd, unsigned long pgoff)
143 struct file *file = NULL;
145 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
146 if (!(flags & MAP_ANONYMOUS)) {
152 down_write(¤t->mm->mmap_sem);
153 error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
154 up_write(¤t->mm->mmap_sem);
162 asmlinkage int old_mmap(unsigned long addr, unsigned long len,
163 unsigned long prot, unsigned long flags,
164 int fd, unsigned long off)
166 if (off & ~PAGE_MASK)
168 return do_mmap2(addr, len, prot, flags, fd, off>>PAGE_SHIFT);
171 asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
172 unsigned long prot, unsigned long flags,
173 unsigned long fd, unsigned long pgoff)
175 return do_mmap2(addr, len, prot, flags, fd, pgoff);
179 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
181 * This is really horribly ugly.
183 asmlinkage int sys_ipc(uint call, int first, int second,
184 int third, void __user *ptr, long fifth)
188 version = call >> 16; /* hack for backward compatibility */
194 return sys_semtimedop(first, (struct sembuf __user *)ptr,
197 return sys_semtimedop(first, (struct sembuf __user *)ptr,
199 (const struct timespec __user *)fifth);
201 return sys_semget (first, second, third);
206 if (get_user(fourth.__pad, (void * __user *) ptr))
208 return sys_semctl (first, second, third, fourth);
217 return sys_msgsnd (first, (struct msgbuf __user *) ptr,
222 struct ipc_kludge tmp;
226 if (copy_from_user(&tmp,
227 (struct ipc_kludge __user *) ptr,
230 return sys_msgrcv (first, tmp.msgp, second,
234 return sys_msgrcv (first,
235 (struct msgbuf __user *) ptr,
236 second, fifth, third);
239 return sys_msgget ((key_t) first, second);
241 return sys_msgctl (first, second,
242 (struct msqid_ds __user *) ptr);
252 ret = do_shmat (first, (char __user *) ptr,
256 return put_user (raddr, (ulong __user *) third);
258 case 1: /* iBCS2 emulator entry point */
259 if (!segment_eq(get_fs(), get_ds()))
261 return do_shmat (first, (char __user *) ptr,
262 second, (ulong *) third);
265 return sys_shmdt ((char __user *)ptr);
267 return sys_shmget (first, second, third);
269 return sys_shmctl (first, second,
270 (struct shmid_ds __user *) ptr);
278 asmlinkage int sys_uname(struct old_utsname * name)
284 err = copy_to_user(name, utsname(), sizeof (*name));
286 return err?-EFAULT:0;
289 asmlinkage ssize_t sys_pread_wrapper(unsigned int fd, char * buf,
290 size_t count, long dummy, loff_t pos)
292 return sys_pread64(fd, buf, count, pos);
295 asmlinkage ssize_t sys_pwrite_wrapper(unsigned int fd, const char * buf,
296 size_t count, long dummy, loff_t pos)
298 return sys_pwrite64(fd, buf, count, pos);
301 asmlinkage int sys_fadvise64_64_wrapper(int fd, u32 offset0, u32 offset1,
302 u32 len0, u32 len1, int advice)
304 #ifdef __LITTLE_ENDIAN__
305 return sys_fadvise64_64(fd, (u64)offset1 << 32 | offset0,
306 (u64)len1 << 32 | len0, advice);
308 return sys_fadvise64_64(fd, (u64)offset0 << 32 | offset1,
309 (u64)len0 << 32 | len1, advice);