void bpf_map_put(struct bpf_map *map);
int bpf_map_charge_memlock(struct bpf_map *map, u32 pages);
void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages);
-int bpf_map_charge_init(struct bpf_map_memory *mem, u32 pages);
+int bpf_map_charge_init(struct bpf_map_memory *mem, size_t size);
void bpf_map_charge_finish(struct bpf_map_memory *mem);
void bpf_map_charge_move(struct bpf_map_memory *dst,
struct bpf_map_memory *src);
/* make sure there is no u32 overflow later in round_up() */
cost = array_size;
- if (cost >= U32_MAX - PAGE_SIZE)
- return ERR_PTR(-ENOMEM);
- if (percpu) {
+ if (percpu)
cost += (u64)attr->max_entries * elem_size * num_possible_cpus();
- if (cost >= U32_MAX - PAGE_SIZE)
- return ERR_PTR(-ENOMEM);
- }
- cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
ret = bpf_map_charge_init(&mem, cost);
if (ret < 0)
/* make sure page count doesn't overflow */
cost = (u64) cmap->map.max_entries * sizeof(struct bpf_cpu_map_entry *);
cost += cpu_map_bitmap_size(attr) * num_possible_cpus();
- if (cost >= U32_MAX - PAGE_SIZE)
- goto free_cmap;
/* Notice returns -EPERM on if map size is larger than memlock limit */
- ret = bpf_map_charge_init(&cmap->map.memory,
- round_up(cost, PAGE_SIZE) >> PAGE_SHIFT);
+ ret = bpf_map_charge_init(&cmap->map.memory, cost);
if (ret) {
err = ret;
goto free_cmap;
/* make sure page count doesn't overflow */
cost = (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *);
cost += dev_map_bitmap_size(attr) * num_possible_cpus();
- if (cost >= U32_MAX - PAGE_SIZE)
- goto free_dtab;
/* if map size is larger than memlock limit, reject it */
- err = bpf_map_charge_init(&dtab->map.memory,
- round_up(cost, PAGE_SIZE) >> PAGE_SHIFT);
+ err = bpf_map_charge_init(&dtab->map.memory, cost);
if (err)
goto free_dtab;
else
cost += (u64) htab->elem_size * num_possible_cpus();
- if (cost >= U32_MAX - PAGE_SIZE)
- /* make sure page count doesn't overflow */
- goto free_htab;
-
/* if map size is larger than memlock limit, reject it */
- err = bpf_map_charge_init(&htab->map.memory,
- round_up(cost, PAGE_SIZE) >> PAGE_SHIFT);
+ err = bpf_map_charge_init(&htab->map.memory, cost);
if (err)
goto free_htab;
int numa_node = bpf_map_attr_numa_node(attr);
struct bpf_cgroup_storage_map *map;
struct bpf_map_memory mem;
- u32 pages;
int ret;
if (attr->key_size != sizeof(struct bpf_cgroup_storage_key))
/* max_entries is not used and enforced to be 0 */
return ERR_PTR(-EINVAL);
- pages = round_up(sizeof(struct bpf_cgroup_storage_map), PAGE_SIZE) >>
- PAGE_SHIFT;
- ret = bpf_map_charge_init(&mem, pages);
+ ret = bpf_map_charge_init(&mem, sizeof(struct bpf_cgroup_storage_map));
if (ret < 0)
return ERR_PTR(ret);
cost_per_node = sizeof(struct lpm_trie_node) +
attr->value_size + trie->data_size;
cost += (u64) attr->max_entries * cost_per_node;
- if (cost >= U32_MAX - PAGE_SIZE) {
- ret = -E2BIG;
- goto out_err;
- }
- ret = bpf_map_charge_init(&trie->map.memory,
- round_up(cost, PAGE_SIZE) >> PAGE_SHIFT);
+ ret = bpf_map_charge_init(&trie->map.memory, cost);
if (ret)
goto out_err;
size = (u64) attr->max_entries + 1;
cost = queue_size = sizeof(*qs) + size * attr->value_size;
- if (cost >= U32_MAX - PAGE_SIZE)
- return ERR_PTR(-E2BIG);
-
- cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
ret = bpf_map_charge_init(&mem, cost);
if (ret < 0)
int err, numa_node = bpf_map_attr_numa_node(attr);
struct reuseport_array *array;
struct bpf_map_memory mem;
- u64 cost, array_size;
+ u64 array_size;
if (!capable(CAP_SYS_ADMIN))
return ERR_PTR(-EPERM);
array_size = sizeof(*array);
array_size += (u64)attr->max_entries * sizeof(struct sock *);
- /* make sure there is no u32 overflow later in round_up() */
- cost = array_size;
- if (cost >= U32_MAX - PAGE_SIZE)
- return ERR_PTR(-ENOMEM);
- cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
-
- err = bpf_map_charge_init(&mem, cost);
+ err = bpf_map_charge_init(&mem, array_size);
if (err)
return ERR_PTR(err);
n_buckets = roundup_pow_of_two(attr->max_entries);
cost = n_buckets * sizeof(struct stack_map_bucket *) + sizeof(*smap);
- if (cost >= U32_MAX - PAGE_SIZE)
- return ERR_PTR(-E2BIG);
cost += n_buckets * (value_size + sizeof(struct stack_map_bucket));
- if (cost >= U32_MAX - PAGE_SIZE)
- return ERR_PTR(-E2BIG);
-
- err = bpf_map_charge_init(&mem,
- round_up(cost, PAGE_SIZE) >> PAGE_SHIFT);
+ err = bpf_map_charge_init(&mem, cost);
if (err)
return ERR_PTR(err);
atomic_long_sub(pages, &user->locked_vm);
}
-int bpf_map_charge_init(struct bpf_map_memory *mem, u32 pages)
+int bpf_map_charge_init(struct bpf_map_memory *mem, size_t size)
{
- struct user_struct *user = get_current_user();
+ u32 pages = round_up(size, PAGE_SIZE) >> PAGE_SHIFT;
+ struct user_struct *user;
int ret;
+ if (size >= U32_MAX - PAGE_SIZE)
+ return -E2BIG;
+
+ user = get_current_user();
ret = bpf_charge_memlock(user, pages);
if (ret) {
free_uid(user);
cost = (u64)m->map.max_entries * sizeof(struct xdp_sock *);
cost += sizeof(struct list_head) * num_possible_cpus();
- if (cost >= U32_MAX - PAGE_SIZE)
- goto free_m;
/* Notice returns -EPERM on if map size is larger than memlock limit */
- err = bpf_map_charge_init(&m->map.memory,
- round_up(cost, PAGE_SIZE) >> PAGE_SHIFT);
+ err = bpf_map_charge_init(&m->map.memory, cost);
if (err)
goto free_m;
struct bpf_sk_storage_map *smap;
unsigned int i;
u32 nbuckets;
- u32 pages;
u64 cost;
int ret;
smap->bucket_log = ilog2(roundup_pow_of_two(num_possible_cpus()));
nbuckets = 1U << smap->bucket_log;
cost = sizeof(*smap->buckets) * nbuckets + sizeof(*smap);
- pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
- ret = bpf_map_charge_init(&smap->map.memory, pages);
+ ret = bpf_map_charge_init(&smap->map.memory, cost);
if (ret < 0) {
kfree(smap);
return ERR_PTR(ret);
/* Make sure page count doesn't overflow. */
cost = (u64) stab->map.max_entries * sizeof(struct sock *);
- if (cost >= U32_MAX - PAGE_SIZE) {
- err = -EINVAL;
- goto free_stab;
- }
-
- err = bpf_map_charge_init(&stab->map.memory,
- round_up(cost, PAGE_SIZE) >> PAGE_SHIFT);
+ err = bpf_map_charge_init(&stab->map.memory, cost);
if (err)
goto free_stab;