if (!max_count)
return 0;
+ key = kmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
+ if (!key)
+ return -ENOMEM;
+
for (cp = 0; cp < max_count; cp++) {
- key = __bpf_copy_key(keys + cp * map->key_size, map->key_size);
- if (IS_ERR(key)) {
- err = PTR_ERR(key);
+ err = -EFAULT;
+ if (copy_from_user(key, keys + cp * map->key_size,
+ map->key_size))
break;
- }
if (bpf_map_is_dev_bound(map)) {
err = bpf_map_offload_delete_elem(map, key);
}
if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
err = -EFAULT;
+
+ kfree(key);
return err;
}
if (!max_count)
return 0;
+ key = kmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
+ if (!key)
+ return -ENOMEM;
+
value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
- if (!value)
+ if (!value) {
+ kfree(key);
return -ENOMEM;
+ }
for (cp = 0; cp < max_count; cp++) {
- key = __bpf_copy_key(keys + cp * map->key_size, map->key_size);
- if (IS_ERR(key)) {
- err = PTR_ERR(key);
- break;
- }
err = -EFAULT;
- if (copy_from_user(value, values + cp * value_size, value_size))
+ if (copy_from_user(key, keys + cp * map->key_size,
+ map->key_size) ||
+ copy_from_user(value, values + cp * value_size, value_size))
break;
err = bpf_map_update_value(map, f, key, value,