2 * fs/kernfs/file.c - kernfs file implementation
4 * Copyright (c) 2001-3 Patrick Mochel
5 * Copyright (c) 2007 SUSE Linux Products GmbH
6 * Copyright (c) 2007, 2013 Tejun Heo <tj@kernel.org>
8 * This file is released under the GPLv2.
12 #include <linux/seq_file.h>
13 #include <linux/slab.h>
14 #include <linux/poll.h>
15 #include <linux/pagemap.h>
16 #include <linux/sched.h>
18 #include "kernfs-internal.h"
21 * There's one kernfs_open_file for each open file and one kernfs_open_node
22 * for each kernfs_node with one or more open files.
24 * kernfs_node->attr.open points to kernfs_open_node. attr.open is
25 * protected by kernfs_open_node_lock.
27 * filp->private_data points to seq_file whose ->private points to
28 * kernfs_open_file. kernfs_open_files are chained at
29 * kernfs_open_node->files, which is protected by kernfs_open_file_mutex.
31 static DEFINE_SPINLOCK(kernfs_open_node_lock);
32 static DEFINE_MUTEX(kernfs_open_file_mutex);
34 struct kernfs_open_node {
37 wait_queue_head_t poll;
38 struct list_head files; /* goes through kernfs_open_file.list */
41 static struct kernfs_open_file *kernfs_of(struct file *file)
43 return ((struct seq_file *)file->private_data)->private;
47 * Determine the kernfs_ops for the given kernfs_node. This function must
48 * be called while holding an active reference.
50 static const struct kernfs_ops *kernfs_ops(struct kernfs_node *kn)
52 if (kn->flags & KERNFS_LOCKDEP)
53 lockdep_assert_held(kn);
58 * As kernfs_seq_stop() is also called after kernfs_seq_start() or
59 * kernfs_seq_next() failure, it needs to distinguish whether it's stopping
60 * a seq_file iteration which is fully initialized with an active reference
61 * or an aborted kernfs_seq_start() due to get_active failure. The
62 * position pointer is the only context for each seq_file iteration and
63 * thus the stop condition should be encoded in it. As the return value is
64 * directly visible to userland, ERR_PTR(-ENODEV) is the only acceptable
65 * choice to indicate get_active failure.
67 * Unfortunately, this is complicated due to the optional custom seq_file
68 * operations which may return ERR_PTR(-ENODEV) too. kernfs_seq_stop()
69 * can't distinguish whether ERR_PTR(-ENODEV) is from get_active failure or
70 * custom seq_file operations and thus can't decide whether put_active
71 * should be performed or not only on ERR_PTR(-ENODEV).
73 * This is worked around by factoring out the custom seq_stop() and
74 * put_active part into kernfs_seq_stop_active(), skipping it from
75 * kernfs_seq_stop() if ERR_PTR(-ENODEV) while invoking it directly after
76 * custom seq_file operations fail with ERR_PTR(-ENODEV) - this ensures
77 * that kernfs_seq_stop_active() is skipped only after get_active failure.
79 static void kernfs_seq_stop_active(struct seq_file *sf, void *v)
81 struct kernfs_open_file *of = sf->private;
82 const struct kernfs_ops *ops = kernfs_ops(of->kn);
86 kernfs_put_active(of->kn);
89 static void *kernfs_seq_start(struct seq_file *sf, loff_t *ppos)
91 struct kernfs_open_file *of = sf->private;
92 const struct kernfs_ops *ops;
95 * @of->mutex nests outside active ref and is just to ensure that
96 * the ops aren't called concurrently for the same open file.
98 mutex_lock(&of->mutex);
99 if (!kernfs_get_active(of->kn))
100 return ERR_PTR(-ENODEV);
102 ops = kernfs_ops(of->kn);
103 if (ops->seq_start) {
104 void *next = ops->seq_start(sf, ppos);
105 /* see the comment above kernfs_seq_stop_active() */
106 if (next == ERR_PTR(-ENODEV))
107 kernfs_seq_stop_active(sf, next);
111 * The same behavior and code as single_open(). Returns
112 * !NULL if pos is at the beginning; otherwise, NULL.
114 return NULL + !*ppos;
118 static void *kernfs_seq_next(struct seq_file *sf, void *v, loff_t *ppos)
120 struct kernfs_open_file *of = sf->private;
121 const struct kernfs_ops *ops = kernfs_ops(of->kn);
124 void *next = ops->seq_next(sf, v, ppos);
125 /* see the comment above kernfs_seq_stop_active() */
126 if (next == ERR_PTR(-ENODEV))
127 kernfs_seq_stop_active(sf, next);
131 * The same behavior and code as single_open(), always
132 * terminate after the initial read.
139 static void kernfs_seq_stop(struct seq_file *sf, void *v)
141 struct kernfs_open_file *of = sf->private;
143 if (v != ERR_PTR(-ENODEV))
144 kernfs_seq_stop_active(sf, v);
145 mutex_unlock(&of->mutex);
148 static int kernfs_seq_show(struct seq_file *sf, void *v)
150 struct kernfs_open_file *of = sf->private;
152 of->event = atomic_read(&of->kn->attr.open->event);
154 return of->kn->attr.ops->seq_show(sf, v);
157 static const struct seq_operations kernfs_seq_ops = {
158 .start = kernfs_seq_start,
159 .next = kernfs_seq_next,
160 .stop = kernfs_seq_stop,
161 .show = kernfs_seq_show,
165 * As reading a bin file can have side-effects, the exact offset and bytes
166 * specified in read(2) call should be passed to the read callback making
167 * it difficult to use seq_file. Implement simplistic custom buffering for
170 static ssize_t kernfs_file_direct_read(struct kernfs_open_file *of,
171 char __user *user_buf, size_t count,
174 ssize_t len = min_t(size_t, count, PAGE_SIZE);
175 const struct kernfs_ops *ops;
178 buf = kmalloc(len, GFP_KERNEL);
183 * @of->mutex nests outside active ref and is just to ensure that
184 * the ops aren't called concurrently for the same open file.
186 mutex_lock(&of->mutex);
187 if (!kernfs_get_active(of->kn)) {
189 mutex_unlock(&of->mutex);
193 ops = kernfs_ops(of->kn);
195 len = ops->read(of, buf, len, *ppos);
199 kernfs_put_active(of->kn);
200 mutex_unlock(&of->mutex);
205 if (copy_to_user(user_buf, buf, len)) {
218 * kernfs_fop_read - kernfs vfs read callback
219 * @file: file pointer
220 * @user_buf: data to write
221 * @count: number of bytes
222 * @ppos: starting offset
224 static ssize_t kernfs_fop_read(struct file *file, char __user *user_buf,
225 size_t count, loff_t *ppos)
227 struct kernfs_open_file *of = kernfs_of(file);
229 if (of->kn->flags & KERNFS_HAS_SEQ_SHOW)
230 return seq_read(file, user_buf, count, ppos);
232 return kernfs_file_direct_read(of, user_buf, count, ppos);
236 * kernfs_fop_write - kernfs vfs write callback
237 * @file: file pointer
238 * @user_buf: data to write
239 * @count: number of bytes
240 * @ppos: starting offset
242 * Copy data in from userland and pass it to the matching kernfs write
245 * There is no easy way for us to know if userspace is only doing a partial
246 * write, so we don't support them. We expect the entire buffer to come on
247 * the first write. Hint: if you're writing a value, first read the file,
248 * modify only the the value you're changing, then write entire buffer
251 static ssize_t kernfs_fop_write(struct file *file, const char __user *user_buf,
252 size_t count, loff_t *ppos)
254 struct kernfs_open_file *of = kernfs_of(file);
255 const struct kernfs_ops *ops;
259 if (of->atomic_write_len) {
261 if (len > of->atomic_write_len)
264 len = min_t(size_t, count, PAGE_SIZE);
267 buf = kmalloc(len + 1, GFP_KERNEL);
271 if (copy_from_user(buf, user_buf, len)) {
275 buf[len] = '\0'; /* guarantee string termination */
278 * @of->mutex nests outside active ref and is just to ensure that
279 * the ops aren't called concurrently for the same open file.
281 mutex_lock(&of->mutex);
282 if (!kernfs_get_active(of->kn)) {
283 mutex_unlock(&of->mutex);
288 ops = kernfs_ops(of->kn);
290 len = ops->write(of, buf, len, *ppos);
294 kernfs_put_active(of->kn);
295 mutex_unlock(&of->mutex);
304 static void kernfs_vma_open(struct vm_area_struct *vma)
306 struct file *file = vma->vm_file;
307 struct kernfs_open_file *of = kernfs_of(file);
312 if (!kernfs_get_active(of->kn))
315 if (of->vm_ops->open)
316 of->vm_ops->open(vma);
318 kernfs_put_active(of->kn);
321 static int kernfs_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
323 struct file *file = vma->vm_file;
324 struct kernfs_open_file *of = kernfs_of(file);
328 return VM_FAULT_SIGBUS;
330 if (!kernfs_get_active(of->kn))
331 return VM_FAULT_SIGBUS;
333 ret = VM_FAULT_SIGBUS;
334 if (of->vm_ops->fault)
335 ret = of->vm_ops->fault(vma, vmf);
337 kernfs_put_active(of->kn);
341 static int kernfs_vma_page_mkwrite(struct vm_area_struct *vma,
342 struct vm_fault *vmf)
344 struct file *file = vma->vm_file;
345 struct kernfs_open_file *of = kernfs_of(file);
349 return VM_FAULT_SIGBUS;
351 if (!kernfs_get_active(of->kn))
352 return VM_FAULT_SIGBUS;
355 if (of->vm_ops->page_mkwrite)
356 ret = of->vm_ops->page_mkwrite(vma, vmf);
358 file_update_time(file);
360 kernfs_put_active(of->kn);
364 static int kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr,
365 void *buf, int len, int write)
367 struct file *file = vma->vm_file;
368 struct kernfs_open_file *of = kernfs_of(file);
374 if (!kernfs_get_active(of->kn))
378 if (of->vm_ops->access)
379 ret = of->vm_ops->access(vma, addr, buf, len, write);
381 kernfs_put_active(of->kn);
386 static int kernfs_vma_set_policy(struct vm_area_struct *vma,
387 struct mempolicy *new)
389 struct file *file = vma->vm_file;
390 struct kernfs_open_file *of = kernfs_of(file);
396 if (!kernfs_get_active(of->kn))
400 if (of->vm_ops->set_policy)
401 ret = of->vm_ops->set_policy(vma, new);
403 kernfs_put_active(of->kn);
407 static struct mempolicy *kernfs_vma_get_policy(struct vm_area_struct *vma,
410 struct file *file = vma->vm_file;
411 struct kernfs_open_file *of = kernfs_of(file);
412 struct mempolicy *pol;
415 return vma->vm_policy;
417 if (!kernfs_get_active(of->kn))
418 return vma->vm_policy;
420 pol = vma->vm_policy;
421 if (of->vm_ops->get_policy)
422 pol = of->vm_ops->get_policy(vma, addr);
424 kernfs_put_active(of->kn);
428 static int kernfs_vma_migrate(struct vm_area_struct *vma,
429 const nodemask_t *from, const nodemask_t *to,
432 struct file *file = vma->vm_file;
433 struct kernfs_open_file *of = kernfs_of(file);
439 if (!kernfs_get_active(of->kn))
443 if (of->vm_ops->migrate)
444 ret = of->vm_ops->migrate(vma, from, to, flags);
446 kernfs_put_active(of->kn);
451 static const struct vm_operations_struct kernfs_vm_ops = {
452 .open = kernfs_vma_open,
453 .fault = kernfs_vma_fault,
454 .page_mkwrite = kernfs_vma_page_mkwrite,
455 .access = kernfs_vma_access,
457 .set_policy = kernfs_vma_set_policy,
458 .get_policy = kernfs_vma_get_policy,
459 .migrate = kernfs_vma_migrate,
463 static int kernfs_fop_mmap(struct file *file, struct vm_area_struct *vma)
465 struct kernfs_open_file *of = kernfs_of(file);
466 const struct kernfs_ops *ops;
470 * mmap path and of->mutex are prone to triggering spurious lockdep
471 * warnings and we don't want to add spurious locking dependency
472 * between the two. Check whether mmap is actually implemented
473 * without grabbing @of->mutex by testing HAS_MMAP flag. See the
474 * comment in kernfs_file_open() for more details.
476 if (!(of->kn->flags & KERNFS_HAS_MMAP))
479 mutex_lock(&of->mutex);
482 if (!kernfs_get_active(of->kn))
485 ops = kernfs_ops(of->kn);
486 rc = ops->mmap(of, vma);
489 * PowerPC's pci_mmap of legacy_mem uses shmem_zero_setup()
490 * to satisfy versions of X which crash if the mmap fails: that
491 * substitutes a new vm_file, and we don't then want bin_vm_ops.
493 if (vma->vm_file != file)
497 if (of->mmapped && of->vm_ops != vma->vm_ops)
501 * It is not possible to successfully wrap close.
502 * So error if someone is trying to use close.
505 if (vma->vm_ops && vma->vm_ops->close)
510 of->vm_ops = vma->vm_ops;
511 vma->vm_ops = &kernfs_vm_ops;
513 kernfs_put_active(of->kn);
515 mutex_unlock(&of->mutex);
521 * kernfs_get_open_node - get or create kernfs_open_node
522 * @kn: target kernfs_node
523 * @of: kernfs_open_file for this instance of open
525 * If @kn->attr.open exists, increment its reference count; otherwise,
526 * create one. @of is chained to the files list.
529 * Kernel thread context (may sleep).
532 * 0 on success, -errno on failure.
534 static int kernfs_get_open_node(struct kernfs_node *kn,
535 struct kernfs_open_file *of)
537 struct kernfs_open_node *on, *new_on = NULL;
540 mutex_lock(&kernfs_open_file_mutex);
541 spin_lock_irq(&kernfs_open_node_lock);
543 if (!kn->attr.open && new_on) {
544 kn->attr.open = new_on;
550 atomic_inc(&on->refcnt);
551 list_add_tail(&of->list, &on->files);
554 spin_unlock_irq(&kernfs_open_node_lock);
555 mutex_unlock(&kernfs_open_file_mutex);
562 /* not there, initialize a new one and retry */
563 new_on = kmalloc(sizeof(*new_on), GFP_KERNEL);
567 atomic_set(&new_on->refcnt, 0);
568 atomic_set(&new_on->event, 1);
569 init_waitqueue_head(&new_on->poll);
570 INIT_LIST_HEAD(&new_on->files);
575 * kernfs_put_open_node - put kernfs_open_node
576 * @kn: target kernfs_nodet
577 * @of: associated kernfs_open_file
579 * Put @kn->attr.open and unlink @of from the files list. If
580 * reference count reaches zero, disassociate and free it.
585 static void kernfs_put_open_node(struct kernfs_node *kn,
586 struct kernfs_open_file *of)
588 struct kernfs_open_node *on = kn->attr.open;
591 mutex_lock(&kernfs_open_file_mutex);
592 spin_lock_irqsave(&kernfs_open_node_lock, flags);
597 if (atomic_dec_and_test(&on->refcnt))
598 kn->attr.open = NULL;
602 spin_unlock_irqrestore(&kernfs_open_node_lock, flags);
603 mutex_unlock(&kernfs_open_file_mutex);
608 static int kernfs_fop_open(struct inode *inode, struct file *file)
610 struct kernfs_node *kn = file->f_path.dentry->d_fsdata;
611 const struct kernfs_ops *ops;
612 struct kernfs_open_file *of;
613 bool has_read, has_write, has_mmap;
616 if (!kernfs_get_active(kn))
619 ops = kernfs_ops(kn);
621 has_read = ops->seq_show || ops->read || ops->mmap;
622 has_write = ops->write || ops->mmap;
623 has_mmap = ops->mmap;
625 /* check perms and supported operations */
626 if ((file->f_mode & FMODE_WRITE) &&
627 (!(inode->i_mode & S_IWUGO) || !has_write))
630 if ((file->f_mode & FMODE_READ) &&
631 (!(inode->i_mode & S_IRUGO) || !has_read))
634 /* allocate a kernfs_open_file for the file */
636 of = kzalloc(sizeof(struct kernfs_open_file), GFP_KERNEL);
641 * The following is done to give a different lockdep key to
642 * @of->mutex for files which implement mmap. This is a rather
643 * crude way to avoid false positive lockdep warning around
644 * mm->mmap_sem - mmap nests @of->mutex under mm->mmap_sem and
645 * reading /sys/block/sda/trace/act_mask grabs sr_mutex, under
646 * which mm->mmap_sem nests, while holding @of->mutex. As each
647 * open file has a separate mutex, it's okay as long as those don't
648 * happen on the same file. At this point, we can't easily give
649 * each file a separate locking class. Let's differentiate on
650 * whether the file has mmap or not for now.
652 * Both paths of the branch look the same. They're supposed to
653 * look that way and give @of->mutex different static lockdep keys.
656 mutex_init(&of->mutex);
658 mutex_init(&of->mutex);
664 * Write path needs to atomic_write_len outside active reference.
665 * Cache it in open_file. See kernfs_fop_write() for details.
667 of->atomic_write_len = ops->atomic_write_len;
670 * Always instantiate seq_file even if read access doesn't use
671 * seq_file or is not requested. This unifies private data access
672 * and readable regular files are the vast majority anyway.
675 error = seq_open(file, &kernfs_seq_ops);
677 error = seq_open(file, NULL);
681 ((struct seq_file *)file->private_data)->private = of;
683 /* seq_file clears PWRITE unconditionally, restore it if WRITE */
684 if (file->f_mode & FMODE_WRITE)
685 file->f_mode |= FMODE_PWRITE;
687 /* make sure we have open node struct */
688 error = kernfs_get_open_node(kn, of);
692 /* open succeeded, put active references */
693 kernfs_put_active(kn);
697 seq_release(inode, file);
701 kernfs_put_active(kn);
705 static int kernfs_fop_release(struct inode *inode, struct file *filp)
707 struct kernfs_node *kn = filp->f_path.dentry->d_fsdata;
708 struct kernfs_open_file *of = kernfs_of(filp);
710 kernfs_put_open_node(kn, of);
711 seq_release(inode, filp);
717 void kernfs_unmap_bin_file(struct kernfs_node *kn)
719 struct kernfs_open_node *on;
720 struct kernfs_open_file *of;
722 if (!(kn->flags & KERNFS_HAS_MMAP))
725 spin_lock_irq(&kernfs_open_node_lock);
728 atomic_inc(&on->refcnt);
729 spin_unlock_irq(&kernfs_open_node_lock);
733 mutex_lock(&kernfs_open_file_mutex);
734 list_for_each_entry(of, &on->files, list) {
735 struct inode *inode = file_inode(of->file);
736 unmap_mapping_range(inode->i_mapping, 0, 0, 1);
738 mutex_unlock(&kernfs_open_file_mutex);
740 kernfs_put_open_node(kn, NULL);
744 * Kernfs attribute files are pollable. The idea is that you read
745 * the content and then you use 'poll' or 'select' to wait for
746 * the content to change. When the content changes (assuming the
747 * manager for the kobject supports notification), poll will
748 * return POLLERR|POLLPRI, and select will return the fd whether
749 * it is waiting for read, write, or exceptions.
750 * Once poll/select indicates that the value has changed, you
751 * need to close and re-open the file, or seek to 0 and read again.
752 * Reminder: this only works for attributes which actively support
753 * it, and it is not possible to test an attribute from userspace
754 * to see if it supports poll (Neither 'poll' nor 'select' return
755 * an appropriate error code). When in doubt, set a suitable timeout value.
757 static unsigned int kernfs_fop_poll(struct file *filp, poll_table *wait)
759 struct kernfs_open_file *of = kernfs_of(filp);
760 struct kernfs_node *kn = filp->f_path.dentry->d_fsdata;
761 struct kernfs_open_node *on = kn->attr.open;
763 /* need parent for the kobj, grab both */
764 if (!kernfs_get_active(kn))
767 poll_wait(filp, &on->poll, wait);
769 kernfs_put_active(kn);
771 if (of->event != atomic_read(&on->event))
774 return DEFAULT_POLLMASK;
777 return DEFAULT_POLLMASK|POLLERR|POLLPRI;
781 * kernfs_notify - notify a kernfs file
782 * @kn: file to notify
784 * Notify @kn such that poll(2) on @kn wakes up.
786 void kernfs_notify(struct kernfs_node *kn)
788 struct kernfs_open_node *on;
791 spin_lock_irqsave(&kernfs_open_node_lock, flags);
793 if (!WARN_ON(kernfs_type(kn) != KERNFS_FILE)) {
796 atomic_inc(&on->event);
797 wake_up_interruptible(&on->poll);
801 spin_unlock_irqrestore(&kernfs_open_node_lock, flags);
803 EXPORT_SYMBOL_GPL(kernfs_notify);
805 const struct file_operations kernfs_file_fops = {
806 .read = kernfs_fop_read,
807 .write = kernfs_fop_write,
808 .llseek = generic_file_llseek,
809 .mmap = kernfs_fop_mmap,
810 .open = kernfs_fop_open,
811 .release = kernfs_fop_release,
812 .poll = kernfs_fop_poll,
816 * __kernfs_create_file - kernfs internal function to create a file
817 * @parent: directory to create the file in
818 * @name: name of the file
819 * @mode: mode of the file
820 * @size: size of the file
821 * @ops: kernfs operations for the file
822 * @priv: private data for the file
823 * @ns: optional namespace tag of the file
824 * @static_name: don't copy file name
825 * @key: lockdep key for the file's active_ref, %NULL to disable lockdep
827 * Returns the created node on success, ERR_PTR() value on error.
829 struct kernfs_node *__kernfs_create_file(struct kernfs_node *parent,
831 umode_t mode, loff_t size,
832 const struct kernfs_ops *ops,
833 void *priv, const void *ns,
835 struct lock_class_key *key)
837 struct kernfs_node *kn;
843 flags |= KERNFS_STATIC_NAME;
845 kn = kernfs_new_node(parent, name, (mode & S_IALLUGO) | S_IFREG, flags);
847 return ERR_PTR(-ENOMEM);
850 kn->attr.size = size;
854 #ifdef CONFIG_DEBUG_LOCK_ALLOC
856 lockdep_init_map(&kn->dep_map, "s_active", key, 0);
857 kn->flags |= KERNFS_LOCKDEP;
862 * kn->attr.ops is accesible only while holding active ref. We
863 * need to know whether some ops are implemented outside active
864 * ref. Cache their existence in flags.
867 kn->flags |= KERNFS_HAS_SEQ_SHOW;
869 kn->flags |= KERNFS_HAS_MMAP;
871 rc = kernfs_add_one(kn);