3 * Anonymous Shared Memory Subsystem, ashmem
5 * Copyright (C) 2008 Google, Inc.
7 * Robert Love <rlove@google.com>
9 * This software is licensed under the terms of the GNU General Public
10 * License version 2, as published by the Free Software Foundation, and
11 * may be copied, distributed, and modified under those terms.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
19 #define pr_fmt(fmt) "ashmem: " fmt
21 #include <linux/init.h>
22 #include <linux/export.h>
23 #include <linux/file.h>
25 #include <linux/falloc.h>
26 #include <linux/miscdevice.h>
27 #include <linux/security.h>
29 #include <linux/mman.h>
30 #include <linux/uaccess.h>
31 #include <linux/personality.h>
32 #include <linux/bitops.h>
33 #include <linux/mutex.h>
34 #include <linux/shmem_fs.h>
37 #define ASHMEM_NAME_PREFIX "dev/ashmem/"
38 #define ASHMEM_NAME_PREFIX_LEN (sizeof(ASHMEM_NAME_PREFIX) - 1)
39 #define ASHMEM_FULL_NAME_LEN (ASHMEM_NAME_LEN + ASHMEM_NAME_PREFIX_LEN)
42 * struct ashmem_area - The anonymous shared memory area
43 * @name: The optional name in /proc/pid/maps
44 * @unpinned_list: The list of all ashmem areas
45 * @file: The shmem-based backing file
46 * @size: The size of the mapping, in bytes
47 * @prot_mask: The allowed protection bits, as vm_flags
49 * The lifecycle of this structure is from our parent file's open() until
50 * its release(). It is also protected by 'ashmem_mutex'
52 * Warning: Mappings do NOT pin this structure; It dies on close()
55 char name[ASHMEM_FULL_NAME_LEN];
56 struct list_head unpinned_list;
59 unsigned long prot_mask;
63 * struct ashmem_range - A range of unpinned/evictable pages
64 * @lru: The entry in the LRU list
65 * @unpinned: The entry in its area's unpinned list
66 * @asma: The associated anonymous shared memory area.
67 * @pgstart: The starting page (inclusive)
68 * @pgend: The ending page (inclusive)
69 * @purged: The purge status (ASHMEM_NOT or ASHMEM_WAS_PURGED)
71 * The lifecycle of this structure is from unpin to pin.
72 * It is protected by 'ashmem_mutex'
76 struct list_head unpinned;
77 struct ashmem_area *asma;
83 /* LRU list of unpinned pages, protected by ashmem_mutex */
84 static LIST_HEAD(ashmem_lru_list);
87 * long lru_count - The count of pages on our LRU list.
89 * This is protected by ashmem_mutex.
91 static unsigned long lru_count;
94 * ashmem_mutex - protects the list of and each individual ashmem_area
96 * Lock Ordering: ashmex_mutex -> i_mutex -> i_alloc_sem
98 static DEFINE_MUTEX(ashmem_mutex);
100 static struct kmem_cache *ashmem_area_cachep __read_mostly;
101 static struct kmem_cache *ashmem_range_cachep __read_mostly;
103 #define range_size(range) \
104 ((range)->pgend - (range)->pgstart + 1)
106 #define range_on_lru(range) \
107 ((range)->purged == ASHMEM_NOT_PURGED)
109 #define page_range_subsumes_range(range, start, end) \
110 (((range)->pgstart >= (start)) && ((range)->pgend <= (end)))
112 #define page_range_subsumed_by_range(range, start, end) \
113 (((range)->pgstart <= (start)) && ((range)->pgend >= (end)))
115 #define page_in_range(range, page) \
116 (((range)->pgstart <= (page)) && ((range)->pgend >= (page)))
118 #define page_range_in_range(range, start, end) \
119 (page_in_range(range, start) || page_in_range(range, end) || \
120 page_range_subsumes_range(range, start, end))
122 #define range_before_page(range, page) \
123 ((range)->pgend < (page))
125 #define PROT_MASK (PROT_EXEC | PROT_READ | PROT_WRITE)
128 * lru_add() - Adds a range of memory to the LRU list
129 * @range: The memory range being added.
131 * The range is first added to the end (tail) of the LRU list.
132 * After this, the size of the range is added to @lru_count
134 static inline void lru_add(struct ashmem_range *range)
136 list_add_tail(&range->lru, &ashmem_lru_list);
137 lru_count += range_size(range);
141 * lru_del() - Removes a range of memory from the LRU list
142 * @range: The memory range being removed
144 * The range is first deleted from the LRU list.
145 * After this, the size of the range is removed from @lru_count
147 static inline void lru_del(struct ashmem_range *range)
149 list_del(&range->lru);
150 lru_count -= range_size(range);
154 * range_alloc() - Allocates and initializes a new ashmem_range structure
155 * @asma: The associated ashmem_area
156 * @prev_range: The previous ashmem_range in the sorted asma->unpinned list
157 * @purged: Initial purge status (ASMEM_NOT_PURGED or ASHMEM_WAS_PURGED)
158 * @start: The starting page (inclusive)
159 * @end: The ending page (inclusive)
161 * This function is protected by ashmem_mutex.
163 * Return: 0 if successful, or -ENOMEM if there is an error
165 static int range_alloc(struct ashmem_area *asma,
166 struct ashmem_range *prev_range, unsigned int purged,
167 size_t start, size_t end)
169 struct ashmem_range *range;
171 range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL);
172 if (unlikely(!range))
176 range->pgstart = start;
178 range->purged = purged;
180 list_add_tail(&range->unpinned, &prev_range->unpinned);
182 if (range_on_lru(range))
189 * range_del() - Deletes and dealloctes an ashmem_range structure
190 * @range: The associated ashmem_range that has previously been allocated
192 static void range_del(struct ashmem_range *range)
194 list_del(&range->unpinned);
195 if (range_on_lru(range))
197 kmem_cache_free(ashmem_range_cachep, range);
201 * range_shrink() - Shrinks an ashmem_range
202 * @range: The associated ashmem_range being shrunk
203 * @start: The starting byte of the new range
204 * @end: The ending byte of the new range
206 * This does not modify the data inside the existing range in any way - It
207 * simply shrinks the boundaries of the range.
209 * Theoretically, with a little tweaking, this could eventually be changed
210 * to range_resize, and expand the lru_count if the new range is larger.
212 static inline void range_shrink(struct ashmem_range *range,
213 size_t start, size_t end)
215 size_t pre = range_size(range);
217 range->pgstart = start;
220 if (range_on_lru(range))
221 lru_count -= pre - range_size(range);
225 * ashmem_open() - Opens an Anonymous Shared Memory structure
226 * @inode: The backing file's index node(?)
227 * @file: The backing file
229 * Please note that the ashmem_area is not returned by this function - It is
230 * instead written to "file->private_data".
232 * Return: 0 if successful, or another code if unsuccessful.
234 static int ashmem_open(struct inode *inode, struct file *file)
236 struct ashmem_area *asma;
239 ret = generic_file_open(inode, file);
243 asma = kmem_cache_zalloc(ashmem_area_cachep, GFP_KERNEL);
247 INIT_LIST_HEAD(&asma->unpinned_list);
248 memcpy(asma->name, ASHMEM_NAME_PREFIX, ASHMEM_NAME_PREFIX_LEN);
249 asma->prot_mask = PROT_MASK;
250 file->private_data = asma;
256 * ashmem_release() - Releases an Anonymous Shared Memory structure
257 * @ignored: The backing file's Index Node(?) - It is ignored here.
258 * @file: The backing file
260 * Return: 0 if successful. If it is anything else, go have a coffee and
263 static int ashmem_release(struct inode *ignored, struct file *file)
265 struct ashmem_area *asma = file->private_data;
266 struct ashmem_range *range, *next;
268 mutex_lock(&ashmem_mutex);
269 list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned)
271 mutex_unlock(&ashmem_mutex);
275 kmem_cache_free(ashmem_area_cachep, asma);
281 * ashmem_read() - Reads a set of bytes from an Ashmem-enabled file
282 * @file: The associated backing file.
283 * @buf: The buffer of data being written to
284 * @len: The number of bytes being read
285 * @pos: The position of the first byte to read.
287 * Return: 0 if successful, or another return code if not.
289 static ssize_t ashmem_read(struct file *file, char __user *buf,
290 size_t len, loff_t *pos)
292 struct ashmem_area *asma = file->private_data;
295 mutex_lock(&ashmem_mutex);
297 /* If size is not set, or set to 0, always return EOF. */
306 mutex_unlock(&ashmem_mutex);
309 * asma and asma->file are used outside the lock here. We assume
310 * once asma->file is set it will never be changed, and will not
311 * be destroyed until all references to the file are dropped and
312 * ashmem_release is called.
314 ret = __vfs_read(asma->file, buf, len, pos);
316 /** Update backing file pos, since f_ops->read() doesn't */
317 asma->file->f_pos = *pos;
321 mutex_unlock(&ashmem_mutex);
325 static loff_t ashmem_llseek(struct file *file, loff_t offset, int origin)
327 struct ashmem_area *asma = file->private_data;
330 mutex_lock(&ashmem_mutex);
332 if (asma->size == 0) {
333 mutex_unlock(&ashmem_mutex);
338 mutex_unlock(&ashmem_mutex);
342 mutex_unlock(&ashmem_mutex);
344 ret = vfs_llseek(asma->file, offset, origin);
348 /** Copy f_pos from backing file, since f_ops->llseek() sets it */
349 file->f_pos = asma->file->f_pos;
353 static inline vm_flags_t calc_vm_may_flags(unsigned long prot)
355 return _calc_vm_trans(prot, PROT_READ, VM_MAYREAD) |
356 _calc_vm_trans(prot, PROT_WRITE, VM_MAYWRITE) |
357 _calc_vm_trans(prot, PROT_EXEC, VM_MAYEXEC);
360 static int ashmem_vmfile_mmap(struct file *file, struct vm_area_struct *vma)
362 /* do not allow to mmap ashmem backing shmem file directly */
367 ashmem_vmfile_get_unmapped_area(struct file *file, unsigned long addr,
368 unsigned long len, unsigned long pgoff,
371 return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
374 static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
376 static struct file_operations vmfile_fops;
377 struct ashmem_area *asma = file->private_data;
380 mutex_lock(&ashmem_mutex);
382 /* user needs to SET_SIZE before mapping */
383 if (unlikely(!asma->size)) {
388 /* requested mapping size larger than object size */
389 if (vma->vm_end - vma->vm_start > PAGE_ALIGN(asma->size)) {
394 /* requested protection bits must match our allowed protection mask */
395 if (unlikely((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask)) &
396 calc_vm_prot_bits(PROT_MASK))) {
400 vma->vm_flags &= ~calc_vm_may_flags(~asma->prot_mask);
403 char *name = ASHMEM_NAME_DEF;
406 if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0')
409 /* ... and allocate the backing shmem file */
410 vmfile = shmem_file_setup(name, asma->size, vma->vm_flags);
411 if (IS_ERR(vmfile)) {
412 ret = PTR_ERR(vmfile);
415 vmfile->f_mode |= FMODE_LSEEK;
418 * override mmap operation of the vmfile so that it can't be
419 * remapped which would lead to creation of a new vma with no
420 * asma permission checks. Have to override get_unmapped_area
421 * as well to prevent VM_BUG_ON check for f_ops modification.
423 if (!vmfile_fops.mmap) {
424 vmfile_fops = *vmfile->f_op;
425 vmfile_fops.mmap = ashmem_vmfile_mmap;
426 vmfile_fops.get_unmapped_area =
427 ashmem_vmfile_get_unmapped_area;
429 vmfile->f_op = &vmfile_fops;
431 get_file(asma->file);
434 * XXX - Reworked to use shmem_zero_setup() instead of
435 * shmem_set_file while we're in staging. -jstultz
437 if (vma->vm_flags & VM_SHARED) {
438 ret = shmem_zero_setup(vma);
447 vma->vm_file = asma->file;
450 mutex_unlock(&ashmem_mutex);
455 * ashmem_shrink - our cache shrinker, called from mm/vmscan.c
457 * 'nr_to_scan' is the number of objects to scan for freeing.
459 * 'gfp_mask' is the mask of the allocation that got us into this mess.
461 * Return value is the number of objects freed or -1 if we cannot
462 * proceed without risk of deadlock (due to gfp_mask).
464 * We approximate LRU via least-recently-unpinned, jettisoning unpinned partial
465 * chunks of ashmem regions LRU-wise one-at-a-time until we hit 'nr_to_scan'
469 ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
471 struct ashmem_range *range, *next;
472 unsigned long freed = 0;
474 /* We might recurse into filesystem code, so bail out if necessary */
475 if (!(sc->gfp_mask & __GFP_FS))
478 if (!mutex_trylock(&ashmem_mutex))
481 list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) {
482 loff_t start = range->pgstart * PAGE_SIZE;
483 loff_t end = (range->pgend + 1) * PAGE_SIZE;
485 vfs_fallocate(range->asma->file,
486 FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
488 range->purged = ASHMEM_WAS_PURGED;
491 freed += range_size(range);
492 if (--sc->nr_to_scan <= 0)
495 mutex_unlock(&ashmem_mutex);
500 ashmem_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
503 * note that lru_count is count of pages on the lru, not a count of
504 * objects on the list. This means the scan function needs to return the
505 * number of pages freed, not the number of objects scanned.
510 static struct shrinker ashmem_shrinker = {
511 .count_objects = ashmem_shrink_count,
512 .scan_objects = ashmem_shrink_scan,
514 * XXX (dchinner): I wish people would comment on why they need on
515 * significant changes to the default value here
517 .seeks = DEFAULT_SEEKS * 4,
520 static int set_prot_mask(struct ashmem_area *asma, unsigned long prot)
524 mutex_lock(&ashmem_mutex);
526 /* the user can only remove, not add, protection bits */
527 if (unlikely((asma->prot_mask & prot) != prot)) {
532 /* does the application expect PROT_READ to imply PROT_EXEC? */
533 if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
536 asma->prot_mask = prot;
539 mutex_unlock(&ashmem_mutex);
543 static int set_name(struct ashmem_area *asma, void __user *name)
547 char local_name[ASHMEM_NAME_LEN];
550 * Holding the ashmem_mutex while doing a copy_from_user might cause
551 * an data abort which would try to access mmap_sem. If another
552 * thread has invoked ashmem_mmap then it will be holding the
553 * semaphore and will be waiting for ashmem_mutex, there by leading to
554 * deadlock. We'll release the mutex and take the name to a local
555 * variable that does not need protection and later copy the local
556 * variable to the structure member with lock held.
558 len = strncpy_from_user(local_name, name, ASHMEM_NAME_LEN);
561 if (len == ASHMEM_NAME_LEN)
562 local_name[ASHMEM_NAME_LEN - 1] = '\0';
563 mutex_lock(&ashmem_mutex);
564 /* cannot change an existing mapping's name */
565 if (unlikely(asma->file))
568 strcpy(asma->name + ASHMEM_NAME_PREFIX_LEN, local_name);
570 mutex_unlock(&ashmem_mutex);
574 static int get_name(struct ashmem_area *asma, void __user *name)
579 * Have a local variable to which we'll copy the content
580 * from asma with the lock held. Later we can copy this to the user
581 * space safely without holding any locks. So even if we proceed to
582 * wait for mmap_sem, it won't lead to deadlock.
584 char local_name[ASHMEM_NAME_LEN];
586 mutex_lock(&ashmem_mutex);
587 if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') {
589 * Copying only `len', instead of ASHMEM_NAME_LEN, bytes
590 * prevents us from revealing one user's stack to another.
592 len = strlen(asma->name + ASHMEM_NAME_PREFIX_LEN) + 1;
593 memcpy(local_name, asma->name + ASHMEM_NAME_PREFIX_LEN, len);
595 len = sizeof(ASHMEM_NAME_DEF);
596 memcpy(local_name, ASHMEM_NAME_DEF, len);
598 mutex_unlock(&ashmem_mutex);
601 * Now we are just copying from the stack variable to userland
604 if (unlikely(copy_to_user(name, local_name, len)))
610 * ashmem_pin - pin the given ashmem region, returning whether it was
611 * previously purged (ASHMEM_WAS_PURGED) or not (ASHMEM_NOT_PURGED).
613 * Caller must hold ashmem_mutex.
615 static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
617 struct ashmem_range *range, *next;
618 int ret = ASHMEM_NOT_PURGED;
620 list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) {
621 /* moved past last applicable page; we can short circuit */
622 if (range_before_page(range, pgstart))
626 * The user can ask us to pin pages that span multiple ranges,
627 * or to pin pages that aren't even unpinned, so this is messy.
630 * 1. The requested range subsumes an existing range, so we
631 * just remove the entire matching range.
632 * 2. The requested range overlaps the start of an existing
633 * range, so we just update that range.
634 * 3. The requested range overlaps the end of an existing
635 * range, so we just update that range.
636 * 4. The requested range punches a hole in an existing range,
637 * so we have to update one side of the range and then
638 * create a new range for the other side.
640 if (page_range_in_range(range, pgstart, pgend)) {
641 ret |= range->purged;
643 /* Case #1: Easy. Just nuke the whole thing. */
644 if (page_range_subsumes_range(range, pgstart, pgend)) {
649 /* Case #2: We overlap from the start, so adjust it */
650 if (range->pgstart >= pgstart) {
651 range_shrink(range, pgend + 1, range->pgend);
655 /* Case #3: We overlap from the rear, so adjust it */
656 if (range->pgend <= pgend) {
657 range_shrink(range, range->pgstart,
663 * Case #4: We eat a chunk out of the middle. A bit
664 * more complicated, we allocate a new range for the
665 * second half and adjust the first chunk's endpoint.
667 range_alloc(asma, range, range->purged,
668 pgend + 1, range->pgend);
669 range_shrink(range, range->pgstart, pgstart - 1);
678 * ashmem_unpin - unpin the given range of pages. Returns zero on success.
680 * Caller must hold ashmem_mutex.
682 static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
684 struct ashmem_range *range, *next;
685 unsigned int purged = ASHMEM_NOT_PURGED;
688 list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) {
689 /* short circuit: this is our insertion point */
690 if (range_before_page(range, pgstart))
694 * The user can ask us to unpin pages that are already entirely
695 * or partially pinned. We handle those two cases here.
697 if (page_range_subsumed_by_range(range, pgstart, pgend))
699 if (page_range_in_range(range, pgstart, pgend)) {
700 pgstart = min_t(size_t, range->pgstart, pgstart);
701 pgend = max_t(size_t, range->pgend, pgend);
702 purged |= range->purged;
708 return range_alloc(asma, range, purged, pgstart, pgend);
712 * ashmem_get_pin_status - Returns ASHMEM_IS_UNPINNED if _any_ pages in the
713 * given interval are unpinned and ASHMEM_IS_PINNED otherwise.
715 * Caller must hold ashmem_mutex.
717 static int ashmem_get_pin_status(struct ashmem_area *asma, size_t pgstart,
720 struct ashmem_range *range;
721 int ret = ASHMEM_IS_PINNED;
723 list_for_each_entry(range, &asma->unpinned_list, unpinned) {
724 if (range_before_page(range, pgstart))
726 if (page_range_in_range(range, pgstart, pgend)) {
727 ret = ASHMEM_IS_UNPINNED;
735 static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
738 struct ashmem_pin pin;
739 size_t pgstart, pgend;
742 if (unlikely(copy_from_user(&pin, p, sizeof(pin))))
745 mutex_lock(&ashmem_mutex);
747 if (unlikely(!asma->file))
750 /* per custom, you can pass zero for len to mean "everything onward" */
752 pin.len = PAGE_ALIGN(asma->size) - pin.offset;
754 if (unlikely((pin.offset | pin.len) & ~PAGE_MASK))
757 if (unlikely(((__u32)-1) - pin.offset < pin.len))
760 if (unlikely(PAGE_ALIGN(asma->size) < pin.offset + pin.len))
763 pgstart = pin.offset / PAGE_SIZE;
764 pgend = pgstart + (pin.len / PAGE_SIZE) - 1;
768 ret = ashmem_pin(asma, pgstart, pgend);
771 ret = ashmem_unpin(asma, pgstart, pgend);
773 case ASHMEM_GET_PIN_STATUS:
774 ret = ashmem_get_pin_status(asma, pgstart, pgend);
779 mutex_unlock(&ashmem_mutex);
784 static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
786 struct ashmem_area *asma = file->private_data;
790 case ASHMEM_SET_NAME:
791 ret = set_name(asma, (void __user *)arg);
793 case ASHMEM_GET_NAME:
794 ret = get_name(asma, (void __user *)arg);
796 case ASHMEM_SET_SIZE:
798 mutex_lock(&ashmem_mutex);
801 asma->size = (size_t)arg;
803 mutex_unlock(&ashmem_mutex);
805 case ASHMEM_GET_SIZE:
808 case ASHMEM_SET_PROT_MASK:
809 ret = set_prot_mask(asma, arg);
811 case ASHMEM_GET_PROT_MASK:
812 ret = asma->prot_mask;
816 case ASHMEM_GET_PIN_STATUS:
817 ret = ashmem_pin_unpin(asma, cmd, (void __user *)arg);
819 case ASHMEM_PURGE_ALL_CACHES:
821 if (capable(CAP_SYS_ADMIN)) {
822 struct shrink_control sc = {
823 .gfp_mask = GFP_KERNEL,
824 .nr_to_scan = LONG_MAX,
826 ret = ashmem_shrink_count(&ashmem_shrinker, &sc);
827 ashmem_shrink_scan(&ashmem_shrinker, &sc);
835 /* support of 32bit userspace on 64bit platforms */
837 static long compat_ashmem_ioctl(struct file *file, unsigned int cmd,
841 case COMPAT_ASHMEM_SET_SIZE:
842 cmd = ASHMEM_SET_SIZE;
844 case COMPAT_ASHMEM_SET_PROT_MASK:
845 cmd = ASHMEM_SET_PROT_MASK;
848 return ashmem_ioctl(file, cmd, arg);
852 static const struct file_operations ashmem_fops = {
853 .owner = THIS_MODULE,
855 .release = ashmem_release,
857 .llseek = ashmem_llseek,
859 .unlocked_ioctl = ashmem_ioctl,
861 .compat_ioctl = compat_ashmem_ioctl,
865 static struct miscdevice ashmem_misc = {
866 .minor = MISC_DYNAMIC_MINOR,
868 .fops = &ashmem_fops,
871 static int __init ashmem_init(void)
875 ashmem_area_cachep = kmem_cache_create("ashmem_area_cache",
876 sizeof(struct ashmem_area),
878 if (unlikely(!ashmem_area_cachep)) {
879 pr_err("failed to create slab cache\n");
883 ashmem_range_cachep = kmem_cache_create("ashmem_range_cache",
884 sizeof(struct ashmem_range),
886 if (unlikely(!ashmem_range_cachep)) {
887 pr_err("failed to create slab cache\n");
891 ret = misc_register(&ashmem_misc);
893 pr_err("failed to register misc device!\n");
897 register_shrinker(&ashmem_shrinker);
899 pr_info("initialized\n");
903 device_initcall(ashmem_init);