static DEFINE_MUTEX(pcp_batch_high_lock);
#define MIN_PERCPU_PAGELIST_HIGH_FRACTION (8)
-struct pagesets {
- local_lock_t lock;
-};
-static DEFINE_PER_CPU(struct pagesets, pagesets) = {
- .lock = INIT_LOCAL_LOCK(lock),
-};
-
#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
/*
* On SMP, spin_trylock is sufficient protection.
#define pcp_trylock_finish(flags) local_irq_restore(flags)
#endif
+/*
+ * Locking a pcp requires a PCP lookup followed by a spinlock. To avoid
+ * a migration causing the wrong PCP to be locked and remote memory being
+ * potentially allocated, pin the task to the CPU for the lookup+lock.
+ * preempt_disable is used on !RT because it is faster than migrate_disable.
+ * migrate_disable is used on RT because otherwise RT spinlock usage is
+ * interfered with and a high priority task cannot preempt the allocator.
+ */
+#ifndef CONFIG_PREEMPT_RT
+#define pcpu_task_pin() preempt_disable()
+#define pcpu_task_unpin() preempt_enable()
+#else
+#define pcpu_task_pin() migrate_disable()
+#define pcpu_task_unpin() migrate_enable()
+#endif
+
+/*
+ * Generic helper to lookup and a per-cpu variable with an embedded spinlock.
+ * Return value should be used with equivalent unlock helper.
+ */
+#define pcpu_spin_lock(type, member, ptr) \
+({ \
+ type *_ret; \
+ pcpu_task_pin(); \
+ _ret = this_cpu_ptr(ptr); \
+ spin_lock(&_ret->member); \
+ _ret; \
+})
+
+#define pcpu_spin_lock_irqsave(type, member, ptr, flags) \
+({ \
+ type *_ret; \
+ pcpu_task_pin(); \
+ _ret = this_cpu_ptr(ptr); \
+ spin_lock_irqsave(&_ret->member, flags); \
+ _ret; \
+})
+
+#define pcpu_spin_trylock_irqsave(type, member, ptr, flags) \
+({ \
+ type *_ret; \
+ pcpu_task_pin(); \
+ _ret = this_cpu_ptr(ptr); \
+ if (!spin_trylock_irqsave(&_ret->member, flags)) { \
+ pcpu_task_unpin(); \
+ _ret = NULL; \
+ } \
+ _ret; \
+})
+
+#define pcpu_spin_unlock(member, ptr) \
+({ \
+ spin_unlock(&ptr->member); \
+ pcpu_task_unpin(); \
+})
+
+#define pcpu_spin_unlock_irqrestore(member, ptr, flags) \
+({ \
+ spin_unlock_irqrestore(&ptr->member, flags); \
+ pcpu_task_unpin(); \
+})
+
+/* struct per_cpu_pages specific helpers. */
+#define pcp_spin_lock(ptr) \
+ pcpu_spin_lock(struct per_cpu_pages, lock, ptr)
+
+#define pcp_spin_lock_irqsave(ptr, flags) \
+ pcpu_spin_lock_irqsave(struct per_cpu_pages, lock, ptr, flags)
+
+#define pcp_spin_trylock_irqsave(ptr, flags) \
+ pcpu_spin_trylock_irqsave(struct per_cpu_pages, lock, ptr, flags)
+
+#define pcp_spin_unlock(ptr) \
+ pcpu_spin_unlock(lock, ptr)
+
+#define pcp_spin_unlock_irqrestore(ptr, flags) \
+ pcpu_spin_unlock_irqrestore(lock, ptr, flags)
#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
DEFINE_PER_CPU(int, numa_node);
EXPORT_PER_CPU_SYMBOL(numa_node);
/* Ensure requested pindex is drained first. */
pindex = pindex - 1;
- /*
- * local_lock_irq held so equivalent to spin_lock_irqsave for
- * both PREEMPT_RT and non-PREEMPT_RT configurations.
- */
+ /* Caller must hold IRQ-safe pcp->lock so IRQs are disabled. */
spin_lock(&zone->lock);
isolated_pageblocks = has_isolate_pageblock(zone);
{
int i, allocated = 0;
- /*
- * local_lock_irq held so equivalent to spin_lock_irqsave for
- * both PREEMPT_RT and non-PREEMPT_RT configurations.
- */
+ /* Caller must hold IRQ-safe pcp->lock so IRQs are disabled. */
spin_lock(&zone->lock);
for (i = 0; i < count; ++i) {
struct page *page = __rmqueue(zone, order, migratetype,
migratetype = MIGRATE_MOVABLE;
}
- local_lock_irqsave(&pagesets.lock, flags);
zone = page_zone(page);
pcp_trylock_prepare(UP_flags);
- pcp = this_cpu_ptr(zone->per_cpu_pageset);
- if (spin_trylock(&pcp->lock)) {
+ pcp = pcp_spin_trylock_irqsave(zone->per_cpu_pageset, flags);
+ if (pcp) {
free_unref_page_commit(zone, pcp, page, migratetype, order);
- spin_unlock(&pcp->lock);
+ pcp_spin_unlock_irqrestore(pcp, flags);
} else {
free_one_page(zone, page, pfn, order, migratetype, FPI_NONE);
}
pcp_trylock_finish(UP_flags);
- local_unlock_irqrestore(&pagesets.lock, flags);
}
/*
}
}
- local_lock_irqsave(&pagesets.lock, flags);
list_for_each_entry_safe(page, next, list, lru) {
struct zone *zone = page_zone(page);
/* Different zone, different pcp lock. */
if (zone != locked_zone) {
if (pcp)
- spin_unlock(&pcp->lock);
+ pcp_spin_unlock_irqrestore(pcp, flags);
+
locked_zone = zone;
- pcp = this_cpu_ptr(zone->per_cpu_pageset);
- spin_lock(&pcp->lock);
+ pcp = pcp_spin_lock_irqsave(locked_zone->per_cpu_pageset, flags);
}
/*
* a large list of pages to free.
*/
if (++batch_count == SWAP_CLUSTER_MAX) {
- spin_unlock(&pcp->lock);
- local_unlock_irqrestore(&pagesets.lock, flags);
+ pcp_spin_unlock_irqrestore(pcp, flags);
batch_count = 0;
- local_lock_irqsave(&pagesets.lock, flags);
- pcp = this_cpu_ptr(locked_zone->per_cpu_pageset);
- spin_lock(&pcp->lock);
+ pcp = pcp_spin_lock_irqsave(locked_zone->per_cpu_pageset, flags);
}
}
if (pcp)
- spin_unlock(&pcp->lock);
- local_unlock_irqrestore(&pagesets.lock, flags);
+ pcp_spin_unlock_irqrestore(pcp, flags);
}
/*
unsigned long flags;
unsigned long __maybe_unused UP_flags;
- local_lock_irqsave(&pagesets.lock, flags);
-
/*
* spin_trylock may fail due to a parallel drain. In the future, the
* trylock will also protect against IRQ reentrancy.
*/
- pcp = this_cpu_ptr(zone->per_cpu_pageset);
pcp_trylock_prepare(UP_flags);
- if (!spin_trylock(&pcp->lock)) {
+ pcp = pcp_spin_trylock_irqsave(zone->per_cpu_pageset, flags);
+ if (!pcp) {
pcp_trylock_finish(UP_flags);
- local_unlock_irqrestore(&pagesets.lock, flags);
return NULL;
}
pcp->free_factor >>= 1;
list = &pcp->lists[order_to_pindex(migratetype, order)];
page = __rmqueue_pcplist(zone, order, migratetype, alloc_flags, pcp, list);
- spin_unlock(&pcp->lock);
+ pcp_spin_unlock_irqrestore(pcp, flags);
pcp_trylock_finish(UP_flags);
- local_unlock_irqrestore(&pagesets.lock, flags);
if (page) {
__count_zid_vm_events(PGALLOC, page_zonenum(page), 1);
zone_statistics(preferred_zone, zone, 1);
goto failed;
/* Is a parallel drain in progress? */
- local_lock_irqsave(&pagesets.lock, flags);
pcp_trylock_prepare(UP_flags);
- pcp = this_cpu_ptr(zone->per_cpu_pageset);
- if (!spin_trylock(&pcp->lock))
+ pcp = pcp_spin_trylock_irqsave(zone->per_cpu_pageset, flags);
+ if (!pcp)
goto failed_irq;
/* Attempt the batch allocation */
if (unlikely(!page)) {
/* Try and allocate at least one page */
if (!nr_account) {
- spin_unlock(&pcp->lock);
+ pcp_spin_unlock_irqrestore(pcp, flags);
goto failed_irq;
}
break;
nr_populated++;
}
- spin_unlock(&pcp->lock);
+ pcp_spin_unlock_irqrestore(pcp, flags);
pcp_trylock_finish(UP_flags);
- local_unlock_irqrestore(&pagesets.lock, flags);
__count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account);
zone_statistics(ac.preferred_zoneref->zone, zone, nr_account);
failed_irq:
pcp_trylock_finish(UP_flags);
- local_unlock_irqrestore(&pagesets.lock, flags);
failed:
page = __alloc_pages(gfp, 0, preferred_nid, nodemask);