struct list_head files; /* goes through kernfs_open_file.list */
};
-/**
- * attribute_to_node - get kernfs_node object corresponding to a kernfs attribute
- * @ptr: &struct kernfs_elem_attr
- * @type: struct kernfs_node
- * @member: name of member (i.e attr)
+/*
+ * kernfs_notify() may be called from any context and bounces notifications
+ * through a work item. To minimize space overhead in kernfs_node, the
+ * pending queue is implemented as a singly linked list of kernfs_nodes.
+ * The list is terminated with the self pointer so that whether a
+ * kernfs_node is on the list or not can be determined by testing the next
+ * pointer for NULL.
*/
-#define attribute_to_node(ptr, type, member) \
- container_of(ptr, type, member)
+#define KERNFS_NOTIFY_EOL ((void *)&kernfs_notify_list)
-static LLIST_HEAD(kernfs_notify_list);
+static DEFINE_SPINLOCK(kernfs_notify_lock);
+static struct kernfs_node *kernfs_notify_list = KERNFS_NOTIFY_EOL;
static inline struct mutex *kernfs_open_file_mutex_ptr(struct kernfs_node *kn)
{
struct kernfs_node *kn;
struct kernfs_super_info *info;
struct kernfs_root *root;
- struct llist_node *free;
- struct kernfs_elem_attr *attr;
repeat:
/* pop one off the notify_list */
- free = llist_del_first(&kernfs_notify_list);
- if (free == NULL)
+ spin_lock_irq(&kernfs_notify_lock);
+ kn = kernfs_notify_list;
+ if (kn == KERNFS_NOTIFY_EOL) {
+ spin_unlock_irq(&kernfs_notify_lock);
return;
+ }
+ kernfs_notify_list = kn->attr.notify_next;
+ kn->attr.notify_next = NULL;
+ spin_unlock_irq(&kernfs_notify_lock);
- attr = llist_entry(free, struct kernfs_elem_attr, notify_next);
- kn = attribute_to_node(attr, struct kernfs_node, attr);
root = kernfs_root(kn);
/* kick fsnotify */
down_write(&root->kernfs_rwsem);
void kernfs_notify(struct kernfs_node *kn)
{
static DECLARE_WORK(kernfs_notify_work, kernfs_notify_workfn);
+ unsigned long flags;
struct kernfs_open_node *on;
if (WARN_ON(kernfs_type(kn) != KERNFS_FILE))
return;
- /* Because we are using llist for kernfs_notify_list */
- WARN_ON_ONCE(in_nmi());
-
/* kick poll immediately */
rcu_read_lock();
on = rcu_dereference(kn->attr.open);
rcu_read_unlock();
/* schedule work to kick fsnotify */
- kernfs_get(kn);
- llist_add(&kn->attr.notify_next, &kernfs_notify_list);
- schedule_work(&kernfs_notify_work);
+ spin_lock_irqsave(&kernfs_notify_lock, flags);
+ if (!kn->attr.notify_next) {
+ kernfs_get(kn);
+ kn->attr.notify_next = kernfs_notify_list;
+ kernfs_notify_list = kn;
+ schedule_work(&kernfs_notify_work);
+ }
+ spin_unlock_irqrestore(&kernfs_notify_lock, flags);
}
EXPORT_SYMBOL_GPL(kernfs_notify);