GNU Linux-libre 4.14.266-gnu1
[releases.git] / ipc / shm.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * linux/ipc/shm.c
4  * Copyright (C) 1992, 1993 Krishna Balasubramanian
5  *       Many improvements/fixes by Bruno Haible.
6  * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
7  * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
8  *
9  * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
10  * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
11  * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
12  * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
13  * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
14  * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
15  * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
16  *
17  * support for audit of ipc object properties and permission changes
18  * Dustin Kirkland <dustin.kirkland@us.ibm.com>
19  *
20  * namespaces support
21  * OpenVZ, SWsoft Inc.
22  * Pavel Emelianov <xemul@openvz.org>
23  *
24  * Better ipc lock (kern_ipc_perm.lock) handling
25  * Davidlohr Bueso <davidlohr.bueso@hp.com>, June 2013.
26  */
27
28 #include <linux/slab.h>
29 #include <linux/mm.h>
30 #include <linux/hugetlb.h>
31 #include <linux/shm.h>
32 #include <linux/init.h>
33 #include <linux/file.h>
34 #include <linux/mman.h>
35 #include <linux/shmem_fs.h>
36 #include <linux/security.h>
37 #include <linux/syscalls.h>
38 #include <linux/audit.h>
39 #include <linux/capability.h>
40 #include <linux/ptrace.h>
41 #include <linux/seq_file.h>
42 #include <linux/rwsem.h>
43 #include <linux/nsproxy.h>
44 #include <linux/mount.h>
45 #include <linux/ipc_namespace.h>
46
47 #include <linux/uaccess.h>
48
49 #include "util.h"
50
51 struct shm_file_data {
52         int id;
53         struct ipc_namespace *ns;
54         struct file *file;
55         const struct vm_operations_struct *vm_ops;
56 };
57
58 #define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data))
59
60 static const struct file_operations shm_file_operations;
61 static const struct vm_operations_struct shm_vm_ops;
62
63 #define shm_ids(ns)     ((ns)->ids[IPC_SHM_IDS])
64
65 #define shm_unlock(shp)                 \
66         ipc_unlock(&(shp)->shm_perm)
67
68 static int newseg(struct ipc_namespace *, struct ipc_params *);
69 static void shm_open(struct vm_area_struct *vma);
70 static void shm_close(struct vm_area_struct *vma);
71 static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp);
72 #ifdef CONFIG_PROC_FS
73 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
74 #endif
75
76 int shm_init_ns(struct ipc_namespace *ns)
77 {
78         ns->shm_ctlmax = SHMMAX;
79         ns->shm_ctlall = SHMALL;
80         ns->shm_ctlmni = SHMMNI;
81         ns->shm_rmid_forced = 0;
82         ns->shm_tot = 0;
83         return ipc_init_ids(&shm_ids(ns));
84 }
85
86 /*
87  * Called with shm_ids.rwsem (writer) and the shp structure locked.
88  * Only shm_ids.rwsem remains locked on exit.
89  */
90 static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
91 {
92         struct shmid_kernel *shp;
93
94         shp = container_of(ipcp, struct shmid_kernel, shm_perm);
95         WARN_ON(ns != shp->ns);
96
97         if (shp->shm_nattch) {
98                 shp->shm_perm.mode |= SHM_DEST;
99                 /* Do not find it any more */
100                 ipc_set_key_private(&shm_ids(ns), &shp->shm_perm);
101                 shm_unlock(shp);
102         } else
103                 shm_destroy(ns, shp);
104 }
105
106 #ifdef CONFIG_IPC_NS
107 void shm_exit_ns(struct ipc_namespace *ns)
108 {
109         free_ipcs(ns, &shm_ids(ns), do_shm_rmid);
110         idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr);
111         rhashtable_destroy(&ns->ids[IPC_SHM_IDS].key_ht);
112 }
113 #endif
114
115 static int __init ipc_ns_init(void)
116 {
117         const int err = shm_init_ns(&init_ipc_ns);
118         WARN(err, "ipc: sysv shm_init_ns failed: %d\n", err);
119         return err;
120 }
121
122 pure_initcall(ipc_ns_init);
123
124 void __init shm_init(void)
125 {
126         ipc_init_proc_interface("sysvipc/shm",
127 #if BITS_PER_LONG <= 32
128                                 "       key      shmid perms       size  cpid  lpid nattch   uid   gid  cuid  cgid      atime      dtime      ctime        rss       swap\n",
129 #else
130                                 "       key      shmid perms                  size  cpid  lpid nattch   uid   gid  cuid  cgid      atime      dtime      ctime                   rss                  swap\n",
131 #endif
132                                 IPC_SHM_IDS, sysvipc_shm_proc_show);
133 }
134
135 static inline struct shmid_kernel *shm_obtain_object(struct ipc_namespace *ns, int id)
136 {
137         struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&shm_ids(ns), id);
138
139         if (IS_ERR(ipcp))
140                 return ERR_CAST(ipcp);
141
142         return container_of(ipcp, struct shmid_kernel, shm_perm);
143 }
144
145 static inline struct shmid_kernel *shm_obtain_object_check(struct ipc_namespace *ns, int id)
146 {
147         struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&shm_ids(ns), id);
148
149         if (IS_ERR(ipcp))
150                 return ERR_CAST(ipcp);
151
152         return container_of(ipcp, struct shmid_kernel, shm_perm);
153 }
154
155 /*
156  * shm_lock_(check_) routines are called in the paths where the rwsem
157  * is not necessarily held.
158  */
159 static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
160 {
161         struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id);
162
163         /*
164          * Callers of shm_lock() must validate the status of the returned ipc
165          * object pointer (as returned by ipc_lock()), and error out as
166          * appropriate.
167          */
168         if (IS_ERR(ipcp))
169                 return (void *)ipcp;
170         return container_of(ipcp, struct shmid_kernel, shm_perm);
171 }
172
173 static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp)
174 {
175         rcu_read_lock();
176         ipc_lock_object(&ipcp->shm_perm);
177 }
178
179 static void shm_rcu_free(struct rcu_head *head)
180 {
181         struct kern_ipc_perm *ptr = container_of(head, struct kern_ipc_perm,
182                                                         rcu);
183         struct shmid_kernel *shp = container_of(ptr, struct shmid_kernel,
184                                                         shm_perm);
185         security_shm_free(shp);
186         kvfree(shp);
187 }
188
189 /*
190  * It has to be called with shp locked.
191  * It must be called before ipc_rmid()
192  */
193 static inline void shm_clist_rm(struct shmid_kernel *shp)
194 {
195         struct task_struct *creator;
196
197         /* ensure that shm_creator does not disappear */
198         rcu_read_lock();
199
200         /*
201          * A concurrent exit_shm may do a list_del_init() as well.
202          * Just do nothing if exit_shm already did the work
203          */
204         if (!list_empty(&shp->shm_clist)) {
205                 /*
206                  * shp->shm_creator is guaranteed to be valid *only*
207                  * if shp->shm_clist is not empty.
208                  */
209                 creator = shp->shm_creator;
210
211                 task_lock(creator);
212                 /*
213                  * list_del_init() is a nop if the entry was already removed
214                  * from the list.
215                  */
216                 list_del_init(&shp->shm_clist);
217                 task_unlock(creator);
218         }
219         rcu_read_unlock();
220 }
221
222 static inline void shm_rmid(struct shmid_kernel *s)
223 {
224         shm_clist_rm(s);
225         ipc_rmid(&shm_ids(s->ns), &s->shm_perm);
226 }
227
228
229 static int __shm_open(struct vm_area_struct *vma)
230 {
231         struct file *file = vma->vm_file;
232         struct shm_file_data *sfd = shm_file_data(file);
233         struct shmid_kernel *shp;
234
235         shp = shm_lock(sfd->ns, sfd->id);
236
237         if (IS_ERR(shp))
238                 return PTR_ERR(shp);
239
240         if (shp->shm_file != sfd->file) {
241                 /* ID was reused */
242                 shm_unlock(shp);
243                 return -EINVAL;
244         }
245
246         shp->shm_atim = ktime_get_real_seconds();
247         shp->shm_lprid = task_tgid_vnr(current);
248         shp->shm_nattch++;
249         shm_unlock(shp);
250         return 0;
251 }
252
253 /* This is called by fork, once for every shm attach. */
254 static void shm_open(struct vm_area_struct *vma)
255 {
256         int err = __shm_open(vma);
257         /*
258          * We raced in the idr lookup or with shm_destroy().
259          * Either way, the ID is busted.
260          */
261         WARN_ON_ONCE(err);
262 }
263
264 /*
265  * shm_destroy - free the struct shmid_kernel
266  *
267  * @ns: namespace
268  * @shp: struct to free
269  *
270  * It has to be called with shp and shm_ids.rwsem (writer) locked,
271  * but returns with shp unlocked and freed.
272  */
273 static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
274 {
275         struct file *shm_file;
276
277         shm_file = shp->shm_file;
278         shp->shm_file = NULL;
279         ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
280         shm_rmid(shp);
281         shm_unlock(shp);
282         if (!is_file_hugepages(shm_file))
283                 shmem_lock(shm_file, 0, shp->mlock_user);
284         else if (shp->mlock_user)
285                 user_shm_unlock(i_size_read(file_inode(shm_file)),
286                                 shp->mlock_user);
287         fput(shm_file);
288         ipc_rcu_putref(&shp->shm_perm, shm_rcu_free);
289 }
290
291 /*
292  * shm_may_destroy - identifies whether shm segment should be destroyed now
293  *
294  * Returns true if and only if there are no active users of the segment and
295  * one of the following is true:
296  *
297  * 1) shmctl(id, IPC_RMID, NULL) was called for this shp
298  *
299  * 2) sysctl kernel.shm_rmid_forced is set to 1.
300  */
301 static bool shm_may_destroy(struct shmid_kernel *shp)
302 {
303         return (shp->shm_nattch == 0) &&
304                (shp->ns->shm_rmid_forced ||
305                 (shp->shm_perm.mode & SHM_DEST));
306 }
307
308 /*
309  * remove the attach descriptor vma.
310  * free memory for segment if it is marked destroyed.
311  * The descriptor has already been removed from the current->mm->mmap list
312  * and will later be kfree()d.
313  */
314 static void shm_close(struct vm_area_struct *vma)
315 {
316         struct file *file = vma->vm_file;
317         struct shm_file_data *sfd = shm_file_data(file);
318         struct shmid_kernel *shp;
319         struct ipc_namespace *ns = sfd->ns;
320
321         down_write(&shm_ids(ns).rwsem);
322         /* remove from the list of attaches of the shm segment */
323         shp = shm_lock(ns, sfd->id);
324
325         /*
326          * We raced in the idr lookup or with shm_destroy().
327          * Either way, the ID is busted.
328          */
329         if (WARN_ON_ONCE(IS_ERR(shp)))
330                 goto done; /* no-op */
331
332         shp->shm_lprid = task_tgid_vnr(current);
333         shp->shm_dtim = ktime_get_real_seconds();
334         shp->shm_nattch--;
335         if (shm_may_destroy(shp))
336                 shm_destroy(ns, shp);
337         else
338                 shm_unlock(shp);
339 done:
340         up_write(&shm_ids(ns).rwsem);
341 }
342
343 /* Called with ns->shm_ids(ns).rwsem locked */
344 static int shm_try_destroy_orphaned(int id, void *p, void *data)
345 {
346         struct ipc_namespace *ns = data;
347         struct kern_ipc_perm *ipcp = p;
348         struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm);
349
350         /*
351          * We want to destroy segments without users and with already
352          * exit'ed originating process.
353          *
354          * As shp->* are changed under rwsem, it's safe to skip shp locking.
355          */
356         if (!list_empty(&shp->shm_clist))
357                 return 0;
358
359         if (shm_may_destroy(shp)) {
360                 shm_lock_by_ptr(shp);
361                 shm_destroy(ns, shp);
362         }
363         return 0;
364 }
365
366 void shm_destroy_orphaned(struct ipc_namespace *ns)
367 {
368         down_write(&shm_ids(ns).rwsem);
369         if (shm_ids(ns).in_use)
370                 idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns);
371         up_write(&shm_ids(ns).rwsem);
372 }
373
374 /* Locking assumes this will only be called with task == current */
375 void exit_shm(struct task_struct *task)
376 {
377         for (;;) {
378                 struct shmid_kernel *shp;
379                 struct ipc_namespace *ns;
380
381                 task_lock(task);
382
383                 if (list_empty(&task->sysvshm.shm_clist)) {
384                         task_unlock(task);
385                         break;
386                 }
387
388                 shp = list_first_entry(&task->sysvshm.shm_clist, struct shmid_kernel,
389                                 shm_clist);
390
391                 /*
392                  * 1) Get pointer to the ipc namespace. It is worth to say
393                  * that this pointer is guaranteed to be valid because
394                  * shp lifetime is always shorter than namespace lifetime
395                  * in which shp lives.
396                  * We taken task_lock it means that shp won't be freed.
397                  */
398                 ns = shp->ns;
399
400                 /*
401                  * 2) If kernel.shm_rmid_forced is not set then only keep track of
402                  * which shmids are orphaned, so that a later set of the sysctl
403                  * can clean them up.
404                  */
405                 if (!ns->shm_rmid_forced)
406                         goto unlink_continue;
407
408                 /*
409                  * 3) get a reference to the namespace.
410                  *    The refcount could be already 0. If it is 0, then
411                  *    the shm objects will be free by free_ipc_work().
412                  */
413                 ns = get_ipc_ns_not_zero(ns);
414                 if (!ns) {
415 unlink_continue:
416                         list_del_init(&shp->shm_clist);
417                         task_unlock(task);
418                         continue;
419                 }
420
421                 /*
422                  * 4) get a reference to shp.
423                  *   This cannot fail: shm_clist_rm() is called before
424                  *   ipc_rmid(), thus the refcount cannot be 0.
425                  */
426                 WARN_ON(!ipc_rcu_getref(&shp->shm_perm));
427
428                 /*
429                  * 5) unlink the shm segment from the list of segments
430                  *    created by current.
431                  *    This must be done last. After unlinking,
432                  *    only the refcounts obtained above prevent IPC_RMID
433                  *    from destroying the segment or the namespace.
434                  */
435                 list_del_init(&shp->shm_clist);
436
437                 task_unlock(task);
438
439                 /*
440                  * 6) we have all references
441                  *    Thus lock & if needed destroy shp.
442                  */
443                 down_write(&shm_ids(ns).rwsem);
444                 shm_lock_by_ptr(shp);
445                 /*
446                  * rcu_read_lock was implicitly taken in shm_lock_by_ptr, it's
447                  * safe to call ipc_rcu_putref here
448                  */
449                 ipc_rcu_putref(&shp->shm_perm, shm_rcu_free);
450
451                 if (ipc_valid_object(&shp->shm_perm)) {
452                         if (shm_may_destroy(shp))
453                                 shm_destroy(ns, shp);
454                         else
455                                 shm_unlock(shp);
456                 } else {
457                         /*
458                          * Someone else deleted the shp from namespace
459                          * idr/kht while we have waited.
460                          * Just unlock and continue.
461                          */
462                         shm_unlock(shp);
463                 }
464
465                 up_write(&shm_ids(ns).rwsem);
466                 put_ipc_ns(ns); /* paired with get_ipc_ns_not_zero */
467         }
468 }
469
470 static int shm_fault(struct vm_fault *vmf)
471 {
472         struct file *file = vmf->vma->vm_file;
473         struct shm_file_data *sfd = shm_file_data(file);
474
475         return sfd->vm_ops->fault(vmf);
476 }
477
478 static int shm_split(struct vm_area_struct *vma, unsigned long addr)
479 {
480         struct file *file = vma->vm_file;
481         struct shm_file_data *sfd = shm_file_data(file);
482
483         if (sfd->vm_ops && sfd->vm_ops->split)
484                 return sfd->vm_ops->split(vma, addr);
485
486         return 0;
487 }
488
489 #ifdef CONFIG_NUMA
490 static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
491 {
492         struct file *file = vma->vm_file;
493         struct shm_file_data *sfd = shm_file_data(file);
494         int err = 0;
495
496         if (sfd->vm_ops->set_policy)
497                 err = sfd->vm_ops->set_policy(vma, new);
498         return err;
499 }
500
501 static struct mempolicy *shm_get_policy(struct vm_area_struct *vma,
502                                         unsigned long addr)
503 {
504         struct file *file = vma->vm_file;
505         struct shm_file_data *sfd = shm_file_data(file);
506         struct mempolicy *pol = NULL;
507
508         if (sfd->vm_ops->get_policy)
509                 pol = sfd->vm_ops->get_policy(vma, addr);
510         else if (vma->vm_policy)
511                 pol = vma->vm_policy;
512
513         return pol;
514 }
515 #endif
516
517 static int shm_mmap(struct file *file, struct vm_area_struct *vma)
518 {
519         struct shm_file_data *sfd = shm_file_data(file);
520         int ret;
521
522         /*
523          * In case of remap_file_pages() emulation, the file can represent an
524          * IPC ID that was removed, and possibly even reused by another shm
525          * segment already.  Propagate this case as an error to caller.
526          */
527         ret = __shm_open(vma);
528         if (ret)
529                 return ret;
530
531         ret = call_mmap(sfd->file, vma);
532         if (ret) {
533                 shm_close(vma);
534                 return ret;
535         }
536         sfd->vm_ops = vma->vm_ops;
537 #ifdef CONFIG_MMU
538         WARN_ON(!sfd->vm_ops->fault);
539 #endif
540         vma->vm_ops = &shm_vm_ops;
541         return 0;
542 }
543
544 static int shm_release(struct inode *ino, struct file *file)
545 {
546         struct shm_file_data *sfd = shm_file_data(file);
547
548         put_ipc_ns(sfd->ns);
549         fput(sfd->file);
550         shm_file_data(file) = NULL;
551         kfree(sfd);
552         return 0;
553 }
554
555 static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync)
556 {
557         struct shm_file_data *sfd = shm_file_data(file);
558
559         if (!sfd->file->f_op->fsync)
560                 return -EINVAL;
561         return sfd->file->f_op->fsync(sfd->file, start, end, datasync);
562 }
563
564 static long shm_fallocate(struct file *file, int mode, loff_t offset,
565                           loff_t len)
566 {
567         struct shm_file_data *sfd = shm_file_data(file);
568
569         if (!sfd->file->f_op->fallocate)
570                 return -EOPNOTSUPP;
571         return sfd->file->f_op->fallocate(file, mode, offset, len);
572 }
573
574 static unsigned long shm_get_unmapped_area(struct file *file,
575         unsigned long addr, unsigned long len, unsigned long pgoff,
576         unsigned long flags)
577 {
578         struct shm_file_data *sfd = shm_file_data(file);
579
580         return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len,
581                                                 pgoff, flags);
582 }
583
584 static const struct file_operations shm_file_operations = {
585         .mmap           = shm_mmap,
586         .fsync          = shm_fsync,
587         .release        = shm_release,
588         .get_unmapped_area      = shm_get_unmapped_area,
589         .llseek         = noop_llseek,
590         .fallocate      = shm_fallocate,
591 };
592
593 /*
594  * shm_file_operations_huge is now identical to shm_file_operations,
595  * but we keep it distinct for the sake of is_file_shm_hugepages().
596  */
597 static const struct file_operations shm_file_operations_huge = {
598         .mmap           = shm_mmap,
599         .fsync          = shm_fsync,
600         .release        = shm_release,
601         .get_unmapped_area      = shm_get_unmapped_area,
602         .llseek         = noop_llseek,
603         .fallocate      = shm_fallocate,
604 };
605
606 bool is_file_shm_hugepages(struct file *file)
607 {
608         return file->f_op == &shm_file_operations_huge;
609 }
610
611 static const struct vm_operations_struct shm_vm_ops = {
612         .open   = shm_open,     /* callback for a new vm-area open */
613         .close  = shm_close,    /* callback for when the vm-area is released */
614         .fault  = shm_fault,
615         .split  = shm_split,
616 #if defined(CONFIG_NUMA)
617         .set_policy = shm_set_policy,
618         .get_policy = shm_get_policy,
619 #endif
620 };
621
622 /**
623  * newseg - Create a new shared memory segment
624  * @ns: namespace
625  * @params: ptr to the structure that contains key, size and shmflg
626  *
627  * Called with shm_ids.rwsem held as a writer.
628  */
629 static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
630 {
631         key_t key = params->key;
632         int shmflg = params->flg;
633         size_t size = params->u.size;
634         int error;
635         struct shmid_kernel *shp;
636         size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
637         struct file *file;
638         char name[13];
639         vm_flags_t acctflag = 0;
640
641         if (size < SHMMIN || size > ns->shm_ctlmax)
642                 return -EINVAL;
643
644         if (numpages << PAGE_SHIFT < size)
645                 return -ENOSPC;
646
647         if (ns->shm_tot + numpages < ns->shm_tot ||
648                         ns->shm_tot + numpages > ns->shm_ctlall)
649                 return -ENOSPC;
650
651         shp = kvmalloc(sizeof(*shp), GFP_KERNEL);
652         if (unlikely(!shp))
653                 return -ENOMEM;
654
655         shp->shm_perm.key = key;
656         shp->shm_perm.mode = (shmflg & S_IRWXUGO);
657         shp->mlock_user = NULL;
658
659         shp->shm_perm.security = NULL;
660         error = security_shm_alloc(shp);
661         if (error) {
662                 kvfree(shp);
663                 return error;
664         }
665
666         sprintf(name, "SYSV%08x", key);
667         if (shmflg & SHM_HUGETLB) {
668                 struct hstate *hs;
669                 size_t hugesize;
670
671                 hs = hstate_sizelog((shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
672                 if (!hs) {
673                         error = -EINVAL;
674                         goto no_file;
675                 }
676                 hugesize = ALIGN(size, huge_page_size(hs));
677
678                 /* hugetlb_file_setup applies strict accounting */
679                 if (shmflg & SHM_NORESERVE)
680                         acctflag = VM_NORESERVE;
681                 file = hugetlb_file_setup(name, hugesize, acctflag,
682                                   &shp->mlock_user, HUGETLB_SHMFS_INODE,
683                                 (shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
684         } else {
685                 /*
686                  * Do not allow no accounting for OVERCOMMIT_NEVER, even
687                  * if it's asked for.
688                  */
689                 if  ((shmflg & SHM_NORESERVE) &&
690                                 sysctl_overcommit_memory != OVERCOMMIT_NEVER)
691                         acctflag = VM_NORESERVE;
692                 file = shmem_kernel_file_setup(name, size, acctflag);
693         }
694         error = PTR_ERR(file);
695         if (IS_ERR(file))
696                 goto no_file;
697
698         shp->shm_cprid = task_tgid_vnr(current);
699         shp->shm_lprid = 0;
700         shp->shm_atim = shp->shm_dtim = 0;
701         shp->shm_ctim = ktime_get_real_seconds();
702         shp->shm_segsz = size;
703         shp->shm_nattch = 0;
704         shp->shm_file = file;
705         shp->shm_creator = current;
706
707         error = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
708         if (error < 0)
709                 goto no_id;
710
711         shp->ns = ns;
712
713         task_lock(current);
714         list_add(&shp->shm_clist, &current->sysvshm.shm_clist);
715         task_unlock(current);
716
717         /*
718          * shmid gets reported as "inode#" in /proc/pid/maps.
719          * proc-ps tools use this. Changing this will break them.
720          */
721         file_inode(file)->i_ino = shp->shm_perm.id;
722
723         ns->shm_tot += numpages;
724         error = shp->shm_perm.id;
725
726         ipc_unlock_object(&shp->shm_perm);
727         rcu_read_unlock();
728         return error;
729
730 no_id:
731         if (is_file_hugepages(file) && shp->mlock_user)
732                 user_shm_unlock(size, shp->mlock_user);
733         fput(file);
734 no_file:
735         call_rcu(&shp->shm_perm.rcu, shm_rcu_free);
736         return error;
737 }
738
739 /*
740  * Called with shm_ids.rwsem and ipcp locked.
741  */
742 static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg)
743 {
744         struct shmid_kernel *shp;
745
746         shp = container_of(ipcp, struct shmid_kernel, shm_perm);
747         return security_shm_associate(shp, shmflg);
748 }
749
750 /*
751  * Called with shm_ids.rwsem and ipcp locked.
752  */
753 static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
754                                 struct ipc_params *params)
755 {
756         struct shmid_kernel *shp;
757
758         shp = container_of(ipcp, struct shmid_kernel, shm_perm);
759         if (shp->shm_segsz < params->u.size)
760                 return -EINVAL;
761
762         return 0;
763 }
764
765 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
766 {
767         struct ipc_namespace *ns;
768         static const struct ipc_ops shm_ops = {
769                 .getnew = newseg,
770                 .associate = shm_security,
771                 .more_checks = shm_more_checks,
772         };
773         struct ipc_params shm_params;
774
775         ns = current->nsproxy->ipc_ns;
776
777         shm_params.key = key;
778         shm_params.flg = shmflg;
779         shm_params.u.size = size;
780
781         return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params);
782 }
783
784 static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
785 {
786         switch (version) {
787         case IPC_64:
788                 return copy_to_user(buf, in, sizeof(*in));
789         case IPC_OLD:
790             {
791                 struct shmid_ds out;
792
793                 memset(&out, 0, sizeof(out));
794                 ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
795                 out.shm_segsz   = in->shm_segsz;
796                 out.shm_atime   = in->shm_atime;
797                 out.shm_dtime   = in->shm_dtime;
798                 out.shm_ctime   = in->shm_ctime;
799                 out.shm_cpid    = in->shm_cpid;
800                 out.shm_lpid    = in->shm_lpid;
801                 out.shm_nattch  = in->shm_nattch;
802
803                 return copy_to_user(buf, &out, sizeof(out));
804             }
805         default:
806                 return -EINVAL;
807         }
808 }
809
810 static inline unsigned long
811 copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version)
812 {
813         switch (version) {
814         case IPC_64:
815                 if (copy_from_user(out, buf, sizeof(*out)))
816                         return -EFAULT;
817                 return 0;
818         case IPC_OLD:
819             {
820                 struct shmid_ds tbuf_old;
821
822                 if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
823                         return -EFAULT;
824
825                 out->shm_perm.uid       = tbuf_old.shm_perm.uid;
826                 out->shm_perm.gid       = tbuf_old.shm_perm.gid;
827                 out->shm_perm.mode      = tbuf_old.shm_perm.mode;
828
829                 return 0;
830             }
831         default:
832                 return -EINVAL;
833         }
834 }
835
836 static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version)
837 {
838         switch (version) {
839         case IPC_64:
840                 return copy_to_user(buf, in, sizeof(*in));
841         case IPC_OLD:
842             {
843                 struct shminfo out;
844
845                 if (in->shmmax > INT_MAX)
846                         out.shmmax = INT_MAX;
847                 else
848                         out.shmmax = (int)in->shmmax;
849
850                 out.shmmin      = in->shmmin;
851                 out.shmmni      = in->shmmni;
852                 out.shmseg      = in->shmseg;
853                 out.shmall      = in->shmall;
854
855                 return copy_to_user(buf, &out, sizeof(out));
856             }
857         default:
858                 return -EINVAL;
859         }
860 }
861
862 /*
863  * Calculate and add used RSS and swap pages of a shm.
864  * Called with shm_ids.rwsem held as a reader
865  */
866 static void shm_add_rss_swap(struct shmid_kernel *shp,
867         unsigned long *rss_add, unsigned long *swp_add)
868 {
869         struct inode *inode;
870
871         inode = file_inode(shp->shm_file);
872
873         if (is_file_hugepages(shp->shm_file)) {
874                 struct address_space *mapping = inode->i_mapping;
875                 struct hstate *h = hstate_file(shp->shm_file);
876                 *rss_add += pages_per_huge_page(h) * mapping->nrpages;
877         } else {
878 #ifdef CONFIG_SHMEM
879                 struct shmem_inode_info *info = SHMEM_I(inode);
880
881                 spin_lock_irq(&info->lock);
882                 *rss_add += inode->i_mapping->nrpages;
883                 *swp_add += info->swapped;
884                 spin_unlock_irq(&info->lock);
885 #else
886                 *rss_add += inode->i_mapping->nrpages;
887 #endif
888         }
889 }
890
891 /*
892  * Called with shm_ids.rwsem held as a reader
893  */
894 static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
895                 unsigned long *swp)
896 {
897         int next_id;
898         int total, in_use;
899
900         *rss = 0;
901         *swp = 0;
902
903         in_use = shm_ids(ns).in_use;
904
905         for (total = 0, next_id = 0; total < in_use; next_id++) {
906                 struct kern_ipc_perm *ipc;
907                 struct shmid_kernel *shp;
908
909                 ipc = idr_find(&shm_ids(ns).ipcs_idr, next_id);
910                 if (ipc == NULL)
911                         continue;
912                 shp = container_of(ipc, struct shmid_kernel, shm_perm);
913
914                 shm_add_rss_swap(shp, rss, swp);
915
916                 total++;
917         }
918 }
919
920 /*
921  * This function handles some shmctl commands which require the rwsem
922  * to be held in write mode.
923  * NOTE: no locks must be held, the rwsem is taken inside this function.
924  */
925 static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
926                        struct shmid64_ds *shmid64)
927 {
928         struct kern_ipc_perm *ipcp;
929         struct shmid_kernel *shp;
930         int err;
931
932         down_write(&shm_ids(ns).rwsem);
933         rcu_read_lock();
934
935         ipcp = ipcctl_pre_down_nolock(ns, &shm_ids(ns), shmid, cmd,
936                                       &shmid64->shm_perm, 0);
937         if (IS_ERR(ipcp)) {
938                 err = PTR_ERR(ipcp);
939                 goto out_unlock1;
940         }
941
942         shp = container_of(ipcp, struct shmid_kernel, shm_perm);
943
944         err = security_shm_shmctl(shp, cmd);
945         if (err)
946                 goto out_unlock1;
947
948         switch (cmd) {
949         case IPC_RMID:
950                 ipc_lock_object(&shp->shm_perm);
951                 /* do_shm_rmid unlocks the ipc object and rcu */
952                 do_shm_rmid(ns, ipcp);
953                 goto out_up;
954         case IPC_SET:
955                 ipc_lock_object(&shp->shm_perm);
956                 err = ipc_update_perm(&shmid64->shm_perm, ipcp);
957                 if (err)
958                         goto out_unlock0;
959                 shp->shm_ctim = ktime_get_real_seconds();
960                 break;
961         default:
962                 err = -EINVAL;
963                 goto out_unlock1;
964         }
965
966 out_unlock0:
967         ipc_unlock_object(&shp->shm_perm);
968 out_unlock1:
969         rcu_read_unlock();
970 out_up:
971         up_write(&shm_ids(ns).rwsem);
972         return err;
973 }
974
975 static int shmctl_ipc_info(struct ipc_namespace *ns,
976                            struct shminfo64 *shminfo)
977 {
978         int err = security_shm_shmctl(NULL, IPC_INFO);
979         if (!err) {
980                 memset(shminfo, 0, sizeof(*shminfo));
981                 shminfo->shmmni = shminfo->shmseg = ns->shm_ctlmni;
982                 shminfo->shmmax = ns->shm_ctlmax;
983                 shminfo->shmall = ns->shm_ctlall;
984                 shminfo->shmmin = SHMMIN;
985                 down_read(&shm_ids(ns).rwsem);
986                 err = ipc_get_maxid(&shm_ids(ns));
987                 up_read(&shm_ids(ns).rwsem);
988                 if (err < 0)
989                         err = 0;
990         }
991         return err;
992 }
993
994 static int shmctl_shm_info(struct ipc_namespace *ns,
995                            struct shm_info *shm_info)
996 {
997         int err = security_shm_shmctl(NULL, SHM_INFO);
998         if (!err) {
999                 memset(shm_info, 0, sizeof(*shm_info));
1000                 down_read(&shm_ids(ns).rwsem);
1001                 shm_info->used_ids = shm_ids(ns).in_use;
1002                 shm_get_stat(ns, &shm_info->shm_rss, &shm_info->shm_swp);
1003                 shm_info->shm_tot = ns->shm_tot;
1004                 shm_info->swap_attempts = 0;
1005                 shm_info->swap_successes = 0;
1006                 err = ipc_get_maxid(&shm_ids(ns));
1007                 up_read(&shm_ids(ns).rwsem);
1008                 if (err < 0)
1009                         err = 0;
1010         }
1011         return err;
1012 }
1013
1014 static int shmctl_stat(struct ipc_namespace *ns, int shmid,
1015                         int cmd, struct shmid64_ds *tbuf)
1016 {
1017         struct shmid_kernel *shp;
1018         int result;
1019         int err;
1020
1021         rcu_read_lock();
1022         if (cmd == SHM_STAT) {
1023                 shp = shm_obtain_object(ns, shmid);
1024                 if (IS_ERR(shp)) {
1025                         err = PTR_ERR(shp);
1026                         goto out_unlock;
1027                 }
1028                 result = shp->shm_perm.id;
1029         } else {
1030                 shp = shm_obtain_object_check(ns, shmid);
1031                 if (IS_ERR(shp)) {
1032                         err = PTR_ERR(shp);
1033                         goto out_unlock;
1034                 }
1035                 result = 0;
1036         }
1037
1038         err = -EACCES;
1039         if (ipcperms(ns, &shp->shm_perm, S_IRUGO))
1040                 goto out_unlock;
1041
1042         err = security_shm_shmctl(shp, cmd);
1043         if (err)
1044                 goto out_unlock;
1045
1046         memset(tbuf, 0, sizeof(*tbuf));
1047         kernel_to_ipc64_perm(&shp->shm_perm, &tbuf->shm_perm);
1048         tbuf->shm_segsz = shp->shm_segsz;
1049         tbuf->shm_atime = shp->shm_atim;
1050         tbuf->shm_dtime = shp->shm_dtim;
1051         tbuf->shm_ctime = shp->shm_ctim;
1052         tbuf->shm_cpid  = shp->shm_cprid;
1053         tbuf->shm_lpid  = shp->shm_lprid;
1054         tbuf->shm_nattch = shp->shm_nattch;
1055         rcu_read_unlock();
1056         return result;
1057
1058 out_unlock:
1059         rcu_read_unlock();
1060         return err;
1061 }
1062
1063 static int shmctl_do_lock(struct ipc_namespace *ns, int shmid, int cmd)
1064 {
1065         struct shmid_kernel *shp;
1066         struct file *shm_file;
1067         int err;
1068
1069         rcu_read_lock();
1070         shp = shm_obtain_object_check(ns, shmid);
1071         if (IS_ERR(shp)) {
1072                 err = PTR_ERR(shp);
1073                 goto out_unlock1;
1074         }
1075
1076         audit_ipc_obj(&(shp->shm_perm));
1077         err = security_shm_shmctl(shp, cmd);
1078         if (err)
1079                 goto out_unlock1;
1080
1081         ipc_lock_object(&shp->shm_perm);
1082
1083         /* check if shm_destroy() is tearing down shp */
1084         if (!ipc_valid_object(&shp->shm_perm)) {
1085                 err = -EIDRM;
1086                 goto out_unlock0;
1087         }
1088
1089         if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) {
1090                 kuid_t euid = current_euid();
1091
1092                 if (!uid_eq(euid, shp->shm_perm.uid) &&
1093                     !uid_eq(euid, shp->shm_perm.cuid)) {
1094                         err = -EPERM;
1095                         goto out_unlock0;
1096                 }
1097                 if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK)) {
1098                         err = -EPERM;
1099                         goto out_unlock0;
1100                 }
1101         }
1102
1103         shm_file = shp->shm_file;
1104         if (is_file_hugepages(shm_file))
1105                 goto out_unlock0;
1106
1107         if (cmd == SHM_LOCK) {
1108                 struct user_struct *user = current_user();
1109
1110                 err = shmem_lock(shm_file, 1, user);
1111                 if (!err && !(shp->shm_perm.mode & SHM_LOCKED)) {
1112                         shp->shm_perm.mode |= SHM_LOCKED;
1113                         shp->mlock_user = user;
1114                 }
1115                 goto out_unlock0;
1116         }
1117
1118         /* SHM_UNLOCK */
1119         if (!(shp->shm_perm.mode & SHM_LOCKED))
1120                 goto out_unlock0;
1121         shmem_lock(shm_file, 0, shp->mlock_user);
1122         shp->shm_perm.mode &= ~SHM_LOCKED;
1123         shp->mlock_user = NULL;
1124         get_file(shm_file);
1125         ipc_unlock_object(&shp->shm_perm);
1126         rcu_read_unlock();
1127         shmem_unlock_mapping(shm_file->f_mapping);
1128
1129         fput(shm_file);
1130         return err;
1131
1132 out_unlock0:
1133         ipc_unlock_object(&shp->shm_perm);
1134 out_unlock1:
1135         rcu_read_unlock();
1136         return err;
1137 }
1138
1139 SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
1140 {
1141         int err, version;
1142         struct ipc_namespace *ns;
1143         struct shmid64_ds sem64;
1144
1145         if (cmd < 0 || shmid < 0)
1146                 return -EINVAL;
1147
1148         version = ipc_parse_version(&cmd);
1149         ns = current->nsproxy->ipc_ns;
1150
1151         switch (cmd) {
1152         case IPC_INFO: {
1153                 struct shminfo64 shminfo;
1154                 err = shmctl_ipc_info(ns, &shminfo);
1155                 if (err < 0)
1156                         return err;
1157                 if (copy_shminfo_to_user(buf, &shminfo, version))
1158                         err = -EFAULT;
1159                 return err;
1160         }
1161         case SHM_INFO: {
1162                 struct shm_info shm_info;
1163                 err = shmctl_shm_info(ns, &shm_info);
1164                 if (err < 0)
1165                         return err;
1166                 if (copy_to_user(buf, &shm_info, sizeof(shm_info)))
1167                         err = -EFAULT;
1168                 return err;
1169         }
1170         case SHM_STAT:
1171         case IPC_STAT: {
1172                 err = shmctl_stat(ns, shmid, cmd, &sem64);
1173                 if (err < 0)
1174                         return err;
1175                 if (copy_shmid_to_user(buf, &sem64, version))
1176                         err = -EFAULT;
1177                 return err;
1178         }
1179         case IPC_SET:
1180                 if (copy_shmid_from_user(&sem64, buf, version))
1181                         return -EFAULT;
1182                 /* fallthru */
1183         case IPC_RMID:
1184                 return shmctl_down(ns, shmid, cmd, &sem64);
1185         case SHM_LOCK:
1186         case SHM_UNLOCK:
1187                 return shmctl_do_lock(ns, shmid, cmd);
1188         default:
1189                 return -EINVAL;
1190         }
1191 }
1192
1193 #ifdef CONFIG_COMPAT
1194
1195 struct compat_shmid_ds {
1196         struct compat_ipc_perm shm_perm;
1197         int shm_segsz;
1198         compat_time_t shm_atime;
1199         compat_time_t shm_dtime;
1200         compat_time_t shm_ctime;
1201         compat_ipc_pid_t shm_cpid;
1202         compat_ipc_pid_t shm_lpid;
1203         unsigned short shm_nattch;
1204         unsigned short shm_unused;
1205         compat_uptr_t shm_unused2;
1206         compat_uptr_t shm_unused3;
1207 };
1208
1209 struct compat_shminfo64 {
1210         compat_ulong_t shmmax;
1211         compat_ulong_t shmmin;
1212         compat_ulong_t shmmni;
1213         compat_ulong_t shmseg;
1214         compat_ulong_t shmall;
1215         compat_ulong_t __unused1;
1216         compat_ulong_t __unused2;
1217         compat_ulong_t __unused3;
1218         compat_ulong_t __unused4;
1219 };
1220
1221 struct compat_shm_info {
1222         compat_int_t used_ids;
1223         compat_ulong_t shm_tot, shm_rss, shm_swp;
1224         compat_ulong_t swap_attempts, swap_successes;
1225 };
1226
1227 static int copy_compat_shminfo_to_user(void __user *buf, struct shminfo64 *in,
1228                                         int version)
1229 {
1230         if (in->shmmax > INT_MAX)
1231                 in->shmmax = INT_MAX;
1232         if (version == IPC_64) {
1233                 struct compat_shminfo64 info;
1234                 memset(&info, 0, sizeof(info));
1235                 info.shmmax = in->shmmax;
1236                 info.shmmin = in->shmmin;
1237                 info.shmmni = in->shmmni;
1238                 info.shmseg = in->shmseg;
1239                 info.shmall = in->shmall;
1240                 return copy_to_user(buf, &info, sizeof(info));
1241         } else {
1242                 struct shminfo info;
1243                 memset(&info, 0, sizeof(info));
1244                 info.shmmax = in->shmmax;
1245                 info.shmmin = in->shmmin;
1246                 info.shmmni = in->shmmni;
1247                 info.shmseg = in->shmseg;
1248                 info.shmall = in->shmall;
1249                 return copy_to_user(buf, &info, sizeof(info));
1250         }
1251 }
1252
1253 static int put_compat_shm_info(struct shm_info *ip,
1254                                 struct compat_shm_info __user *uip)
1255 {
1256         struct compat_shm_info info;
1257
1258         memset(&info, 0, sizeof(info));
1259         info.used_ids = ip->used_ids;
1260         info.shm_tot = ip->shm_tot;
1261         info.shm_rss = ip->shm_rss;
1262         info.shm_swp = ip->shm_swp;
1263         info.swap_attempts = ip->swap_attempts;
1264         info.swap_successes = ip->swap_successes;
1265         return copy_to_user(uip, &info, sizeof(info));
1266 }
1267
1268 static int copy_compat_shmid_to_user(void __user *buf, struct shmid64_ds *in,
1269                                         int version)
1270 {
1271         if (version == IPC_64) {
1272                 struct compat_shmid64_ds v;
1273                 memset(&v, 0, sizeof(v));
1274                 to_compat_ipc64_perm(&v.shm_perm, &in->shm_perm);
1275                 v.shm_atime = in->shm_atime;
1276                 v.shm_dtime = in->shm_dtime;
1277                 v.shm_ctime = in->shm_ctime;
1278                 v.shm_segsz = in->shm_segsz;
1279                 v.shm_nattch = in->shm_nattch;
1280                 v.shm_cpid = in->shm_cpid;
1281                 v.shm_lpid = in->shm_lpid;
1282                 return copy_to_user(buf, &v, sizeof(v));
1283         } else {
1284                 struct compat_shmid_ds v;
1285                 memset(&v, 0, sizeof(v));
1286                 to_compat_ipc_perm(&v.shm_perm, &in->shm_perm);
1287                 v.shm_perm.key = in->shm_perm.key;
1288                 v.shm_atime = in->shm_atime;
1289                 v.shm_dtime = in->shm_dtime;
1290                 v.shm_ctime = in->shm_ctime;
1291                 v.shm_segsz = in->shm_segsz;
1292                 v.shm_nattch = in->shm_nattch;
1293                 v.shm_cpid = in->shm_cpid;
1294                 v.shm_lpid = in->shm_lpid;
1295                 return copy_to_user(buf, &v, sizeof(v));
1296         }
1297 }
1298
1299 static int copy_compat_shmid_from_user(struct shmid64_ds *out, void __user *buf,
1300                                         int version)
1301 {
1302         memset(out, 0, sizeof(*out));
1303         if (version == IPC_64) {
1304                 struct compat_shmid64_ds *p = buf;
1305                 return get_compat_ipc64_perm(&out->shm_perm, &p->shm_perm);
1306         } else {
1307                 struct compat_shmid_ds *p = buf;
1308                 return get_compat_ipc_perm(&out->shm_perm, &p->shm_perm);
1309         }
1310 }
1311
1312 COMPAT_SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, void __user *, uptr)
1313 {
1314         struct ipc_namespace *ns;
1315         struct shmid64_ds sem64;
1316         int version = compat_ipc_parse_version(&cmd);
1317         int err;
1318
1319         ns = current->nsproxy->ipc_ns;
1320
1321         if (cmd < 0 || shmid < 0)
1322                 return -EINVAL;
1323
1324         switch (cmd) {
1325         case IPC_INFO: {
1326                 struct shminfo64 shminfo;
1327                 err = shmctl_ipc_info(ns, &shminfo);
1328                 if (err < 0)
1329                         return err;
1330                 if (copy_compat_shminfo_to_user(uptr, &shminfo, version))
1331                         err = -EFAULT;
1332                 return err;
1333         }
1334         case SHM_INFO: {
1335                 struct shm_info shm_info;
1336                 err = shmctl_shm_info(ns, &shm_info);
1337                 if (err < 0)
1338                         return err;
1339                 if (put_compat_shm_info(&shm_info, uptr))
1340                         err = -EFAULT;
1341                 return err;
1342         }
1343         case IPC_STAT:
1344         case SHM_STAT:
1345                 err = shmctl_stat(ns, shmid, cmd, &sem64);
1346                 if (err < 0)
1347                         return err;
1348                 if (copy_compat_shmid_to_user(uptr, &sem64, version))
1349                         err = -EFAULT;
1350                 return err;
1351
1352         case IPC_SET:
1353                 if (copy_compat_shmid_from_user(&sem64, uptr, version))
1354                         return -EFAULT;
1355                 /* fallthru */
1356         case IPC_RMID:
1357                 return shmctl_down(ns, shmid, cmd, &sem64);
1358         case SHM_LOCK:
1359         case SHM_UNLOCK:
1360                 return shmctl_do_lock(ns, shmid, cmd);
1361                 break;
1362         default:
1363                 return -EINVAL;
1364         }
1365         return err;
1366 }
1367 #endif
1368
1369 /*
1370  * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
1371  *
1372  * NOTE! Despite the name, this is NOT a direct system call entrypoint. The
1373  * "raddr" thing points to kernel space, and there has to be a wrapper around
1374  * this.
1375  */
1376 long do_shmat(int shmid, char __user *shmaddr, int shmflg,
1377               ulong *raddr, unsigned long shmlba)
1378 {
1379         struct shmid_kernel *shp;
1380         unsigned long addr = (unsigned long)shmaddr;
1381         unsigned long size;
1382         struct file *file;
1383         int    err;
1384         unsigned long flags = MAP_SHARED;
1385         unsigned long prot;
1386         int acc_mode;
1387         struct ipc_namespace *ns;
1388         struct shm_file_data *sfd;
1389         struct path path;
1390         fmode_t f_mode;
1391         unsigned long populate = 0;
1392
1393         err = -EINVAL;
1394         if (shmid < 0)
1395                 goto out;
1396
1397         if (addr) {
1398                 if (addr & (shmlba - 1)) {
1399                         if (shmflg & SHM_RND) {
1400                                 addr &= ~(shmlba - 1);  /* round down */
1401
1402                                 /*
1403                                  * Ensure that the round-down is non-nil
1404                                  * when remapping. This can happen for
1405                                  * cases when addr < shmlba.
1406                                  */
1407                                 if (!addr && (shmflg & SHM_REMAP))
1408                                         goto out;
1409                         } else
1410 #ifndef __ARCH_FORCE_SHMLBA
1411                                 if (addr & ~PAGE_MASK)
1412 #endif
1413                                         goto out;
1414                 }
1415
1416                 flags |= MAP_FIXED;
1417         } else if ((shmflg & SHM_REMAP))
1418                 goto out;
1419
1420         if (shmflg & SHM_RDONLY) {
1421                 prot = PROT_READ;
1422                 acc_mode = S_IRUGO;
1423                 f_mode = FMODE_READ;
1424         } else {
1425                 prot = PROT_READ | PROT_WRITE;
1426                 acc_mode = S_IRUGO | S_IWUGO;
1427                 f_mode = FMODE_READ | FMODE_WRITE;
1428         }
1429         if (shmflg & SHM_EXEC) {
1430                 prot |= PROT_EXEC;
1431                 acc_mode |= S_IXUGO;
1432         }
1433
1434         /*
1435          * We cannot rely on the fs check since SYSV IPC does have an
1436          * additional creator id...
1437          */
1438         ns = current->nsproxy->ipc_ns;
1439         rcu_read_lock();
1440         shp = shm_obtain_object_check(ns, shmid);
1441         if (IS_ERR(shp)) {
1442                 err = PTR_ERR(shp);
1443                 goto out_unlock;
1444         }
1445
1446         err = -EACCES;
1447         if (ipcperms(ns, &shp->shm_perm, acc_mode))
1448                 goto out_unlock;
1449
1450         err = security_shm_shmat(shp, shmaddr, shmflg);
1451         if (err)
1452                 goto out_unlock;
1453
1454         ipc_lock_object(&shp->shm_perm);
1455
1456         /* check if shm_destroy() is tearing down shp */
1457         if (!ipc_valid_object(&shp->shm_perm)) {
1458                 ipc_unlock_object(&shp->shm_perm);
1459                 err = -EIDRM;
1460                 goto out_unlock;
1461         }
1462
1463         path = shp->shm_file->f_path;
1464         path_get(&path);
1465         shp->shm_nattch++;
1466         size = i_size_read(d_inode(path.dentry));
1467         ipc_unlock_object(&shp->shm_perm);
1468         rcu_read_unlock();
1469
1470         err = -ENOMEM;
1471         sfd = kzalloc(sizeof(*sfd), GFP_KERNEL);
1472         if (!sfd) {
1473                 path_put(&path);
1474                 goto out_nattch;
1475         }
1476
1477         file = alloc_file(&path, f_mode,
1478                           is_file_hugepages(shp->shm_file) ?
1479                                 &shm_file_operations_huge :
1480                                 &shm_file_operations);
1481         err = PTR_ERR(file);
1482         if (IS_ERR(file)) {
1483                 kfree(sfd);
1484                 path_put(&path);
1485                 goto out_nattch;
1486         }
1487
1488         file->private_data = sfd;
1489         file->f_mapping = shp->shm_file->f_mapping;
1490         sfd->id = shp->shm_perm.id;
1491         sfd->ns = get_ipc_ns(ns);
1492         /*
1493          * We need to take a reference to the real shm file to prevent the
1494          * pointer from becoming stale in cases where the lifetime of the outer
1495          * file extends beyond that of the shm segment.  It's not usually
1496          * possible, but it can happen during remap_file_pages() emulation as
1497          * that unmaps the memory, then does ->mmap() via file reference only.
1498          * We'll deny the ->mmap() if the shm segment was since removed, but to
1499          * detect shm ID reuse we need to compare the file pointers.
1500          */
1501         sfd->file = get_file(shp->shm_file);
1502         sfd->vm_ops = NULL;
1503
1504         err = security_mmap_file(file, prot, flags);
1505         if (err)
1506                 goto out_fput;
1507
1508         if (down_write_killable(&current->mm->mmap_sem)) {
1509                 err = -EINTR;
1510                 goto out_fput;
1511         }
1512
1513         if (addr && !(shmflg & SHM_REMAP)) {
1514                 err = -EINVAL;
1515                 if (addr + size < addr)
1516                         goto invalid;
1517
1518                 if (find_vma_intersection(current->mm, addr, addr + size))
1519                         goto invalid;
1520         }
1521
1522         addr = do_mmap_pgoff(file, addr, size, prot, flags, 0, &populate, NULL);
1523         *raddr = addr;
1524         err = 0;
1525         if (IS_ERR_VALUE(addr))
1526                 err = (long)addr;
1527 invalid:
1528         up_write(&current->mm->mmap_sem);
1529         if (populate)
1530                 mm_populate(addr, populate);
1531
1532 out_fput:
1533         fput(file);
1534
1535 out_nattch:
1536         down_write(&shm_ids(ns).rwsem);
1537         shp = shm_lock(ns, shmid);
1538         shp->shm_nattch--;
1539
1540         if (shm_may_destroy(shp))
1541                 shm_destroy(ns, shp);
1542         else
1543                 shm_unlock(shp);
1544         up_write(&shm_ids(ns).rwsem);
1545         return err;
1546
1547 out_unlock:
1548         rcu_read_unlock();
1549 out:
1550         return err;
1551 }
1552
1553 SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg)
1554 {
1555         unsigned long ret;
1556         long err;
1557
1558         err = do_shmat(shmid, shmaddr, shmflg, &ret, SHMLBA);
1559         if (err)
1560                 return err;
1561         force_successful_syscall_return();
1562         return (long)ret;
1563 }
1564
1565 #ifdef CONFIG_COMPAT
1566
1567 #ifndef COMPAT_SHMLBA
1568 #define COMPAT_SHMLBA   SHMLBA
1569 #endif
1570
1571 COMPAT_SYSCALL_DEFINE3(shmat, int, shmid, compat_uptr_t, shmaddr, int, shmflg)
1572 {
1573         unsigned long ret;
1574         long err;
1575
1576         err = do_shmat(shmid, compat_ptr(shmaddr), shmflg, &ret, COMPAT_SHMLBA);
1577         if (err)
1578                 return err;
1579         force_successful_syscall_return();
1580         return (long)ret;
1581 }
1582 #endif
1583
1584 /*
1585  * detach and kill segment if marked destroyed.
1586  * The work is done in shm_close.
1587  */
1588 SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
1589 {
1590         struct mm_struct *mm = current->mm;
1591         struct vm_area_struct *vma;
1592         unsigned long addr = (unsigned long)shmaddr;
1593         int retval = -EINVAL;
1594 #ifdef CONFIG_MMU
1595         loff_t size = 0;
1596         struct file *file;
1597         struct vm_area_struct *next;
1598 #endif
1599
1600         if (addr & ~PAGE_MASK)
1601                 return retval;
1602
1603         if (down_write_killable(&mm->mmap_sem))
1604                 return -EINTR;
1605
1606         /*
1607          * This function tries to be smart and unmap shm segments that
1608          * were modified by partial mlock or munmap calls:
1609          * - It first determines the size of the shm segment that should be
1610          *   unmapped: It searches for a vma that is backed by shm and that
1611          *   started at address shmaddr. It records it's size and then unmaps
1612          *   it.
1613          * - Then it unmaps all shm vmas that started at shmaddr and that
1614          *   are within the initially determined size and that are from the
1615          *   same shm segment from which we determined the size.
1616          * Errors from do_munmap are ignored: the function only fails if
1617          * it's called with invalid parameters or if it's called to unmap
1618          * a part of a vma. Both calls in this function are for full vmas,
1619          * the parameters are directly copied from the vma itself and always
1620          * valid - therefore do_munmap cannot fail. (famous last words?)
1621          */
1622         /*
1623          * If it had been mremap()'d, the starting address would not
1624          * match the usual checks anyway. So assume all vma's are
1625          * above the starting address given.
1626          */
1627         vma = find_vma(mm, addr);
1628
1629 #ifdef CONFIG_MMU
1630         while (vma) {
1631                 next = vma->vm_next;
1632
1633                 /*
1634                  * Check if the starting address would match, i.e. it's
1635                  * a fragment created by mprotect() and/or munmap(), or it
1636                  * otherwise it starts at this address with no hassles.
1637                  */
1638                 if ((vma->vm_ops == &shm_vm_ops) &&
1639                         (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
1640
1641                         /*
1642                          * Record the file of the shm segment being
1643                          * unmapped.  With mremap(), someone could place
1644                          * page from another segment but with equal offsets
1645                          * in the range we are unmapping.
1646                          */
1647                         file = vma->vm_file;
1648                         size = i_size_read(file_inode(vma->vm_file));
1649                         do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL);
1650                         /*
1651                          * We discovered the size of the shm segment, so
1652                          * break out of here and fall through to the next
1653                          * loop that uses the size information to stop
1654                          * searching for matching vma's.
1655                          */
1656                         retval = 0;
1657                         vma = next;
1658                         break;
1659                 }
1660                 vma = next;
1661         }
1662
1663         /*
1664          * We need look no further than the maximum address a fragment
1665          * could possibly have landed at. Also cast things to loff_t to
1666          * prevent overflows and make comparisons vs. equal-width types.
1667          */
1668         size = PAGE_ALIGN(size);
1669         while (vma && (loff_t)(vma->vm_end - addr) <= size) {
1670                 next = vma->vm_next;
1671
1672                 /* finding a matching vma now does not alter retval */
1673                 if ((vma->vm_ops == &shm_vm_ops) &&
1674                     ((vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) &&
1675                     (vma->vm_file == file))
1676                         do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL);
1677                 vma = next;
1678         }
1679
1680 #else   /* CONFIG_MMU */
1681         /* under NOMMU conditions, the exact address to be destroyed must be
1682          * given
1683          */
1684         if (vma && vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) {
1685                 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL);
1686                 retval = 0;
1687         }
1688
1689 #endif
1690
1691         up_write(&mm->mmap_sem);
1692         return retval;
1693 }
1694
1695 #ifdef CONFIG_PROC_FS
1696 static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
1697 {
1698         struct user_namespace *user_ns = seq_user_ns(s);
1699         struct kern_ipc_perm *ipcp = it;
1700         struct shmid_kernel *shp;
1701         unsigned long rss = 0, swp = 0;
1702
1703         shp = container_of(ipcp, struct shmid_kernel, shm_perm);
1704         shm_add_rss_swap(shp, &rss, &swp);
1705
1706 #if BITS_PER_LONG <= 32
1707 #define SIZE_SPEC "%10lu"
1708 #else
1709 #define SIZE_SPEC "%21lu"
1710 #endif
1711
1712         seq_printf(s,
1713                    "%10d %10d  %4o " SIZE_SPEC " %5u %5u  "
1714                    "%5lu %5u %5u %5u %5u %10llu %10llu %10llu "
1715                    SIZE_SPEC " " SIZE_SPEC "\n",
1716                    shp->shm_perm.key,
1717                    shp->shm_perm.id,
1718                    shp->shm_perm.mode,
1719                    shp->shm_segsz,
1720                    shp->shm_cprid,
1721                    shp->shm_lprid,
1722                    shp->shm_nattch,
1723                    from_kuid_munged(user_ns, shp->shm_perm.uid),
1724                    from_kgid_munged(user_ns, shp->shm_perm.gid),
1725                    from_kuid_munged(user_ns, shp->shm_perm.cuid),
1726                    from_kgid_munged(user_ns, shp->shm_perm.cgid),
1727                    shp->shm_atim,
1728                    shp->shm_dtim,
1729                    shp->shm_ctim,
1730                    rss * PAGE_SIZE,
1731                    swp * PAGE_SIZE);
1732
1733         return 0;
1734 }
1735 #endif