Lines Matching +full:cpu +full:- +full:map

1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
47 * from sys_bpf(). BPF recursion is prevented by incrementing the per CPU
51 * by pinning the task to the current CPU and incrementing the recursion
52 * protection across the map operation.
73 * it is only safe to use raw spinlock for preallocated hash map on a RT kernel,
75 * after hash map was fully converted to use bpf_mem_alloc, there will be
76 * non-synchronous memory allocation for non-preallocated hash map, so it is
85 #define HASHTAB_MAP_LOCK_MASK (HASHTAB_MAP_LOCK_COUNT - 1)
88 struct bpf_map map; member
98 /* number of elements in non-preallocated hashtable are kept
124 /* pointer to per-cpu pointer */
134 return !(htab->map.map_flags & BPF_F_NO_PREALLOC); in htab_is_prealloc()
141 for (i = 0; i < htab->n_buckets; i++) { in htab_init_buckets()
142 INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i); in htab_init_buckets()
143 raw_spin_lock_init(&htab->buckets[i].raw_lock); in htab_init_buckets()
144 lockdep_set_class(&htab->buckets[i].raw_lock, in htab_init_buckets()
145 &htab->lockdep_key); in htab_init_buckets()
156 hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets - 1); in htab_lock_bucket()
160 if (unlikely(__this_cpu_inc_return(*(htab->map_locked[hash])) != 1)) { in htab_lock_bucket()
161 __this_cpu_dec(*(htab->map_locked[hash])); in htab_lock_bucket()
164 return -EBUSY; in htab_lock_bucket()
167 raw_spin_lock(&b->raw_lock); in htab_lock_bucket()
177 hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets - 1); in htab_unlock_bucket()
178 raw_spin_unlock(&b->raw_lock); in htab_unlock_bucket()
179 __this_cpu_dec(*(htab->map_locked[hash])); in htab_unlock_bucket()
188 return htab->map.map_type == BPF_MAP_TYPE_LRU_HASH || in htab_is_lru()
189 htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH; in htab_is_lru()
194 return htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH || in htab_is_percpu()
195 htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH; in htab_is_percpu()
201 *(void __percpu **)(l->key + key_size) = pptr; in htab_elem_set_ptr()
206 return *(void __percpu **)(l->key + key_size); in htab_elem_get_ptr()
209 static void *fd_htab_map_get_ptr(const struct bpf_map *map, struct htab_elem *l) in fd_htab_map_get_ptr() argument
211 return *(void **)(l->key + roundup(map->key_size, 8)); in fd_htab_map_get_ptr()
216 return (struct htab_elem *) (htab->elems + i * (u64)htab->elem_size); in get_htab_elem()
226 u32 num_entries = htab->map.max_entries; in htab_free_prealloced_timers_and_wq()
236 if (btf_record_has_field(htab->map.record, BPF_TIMER)) in htab_free_prealloced_timers_and_wq()
237 bpf_obj_free_timer(htab->map.record, in htab_free_prealloced_timers_and_wq()
238 elem->key + round_up(htab->map.key_size, 8)); in htab_free_prealloced_timers_and_wq()
239 if (btf_record_has_field(htab->map.record, BPF_WORKQUEUE)) in htab_free_prealloced_timers_and_wq()
240 bpf_obj_free_workqueue(htab->map.record, in htab_free_prealloced_timers_and_wq()
241 elem->key + round_up(htab->map.key_size, 8)); in htab_free_prealloced_timers_and_wq()
248 u32 num_entries = htab->map.max_entries; in htab_free_prealloced_fields()
251 if (IS_ERR_OR_NULL(htab->map.record)) in htab_free_prealloced_fields()
260 void __percpu *pptr = htab_elem_get_ptr(elem, htab->map.key_size); in htab_free_prealloced_fields()
261 int cpu; in htab_free_prealloced_fields() local
263 for_each_possible_cpu(cpu) { in htab_free_prealloced_fields()
264 bpf_obj_free_fields(htab->map.record, per_cpu_ptr(pptr, cpu)); in htab_free_prealloced_fields()
268 bpf_obj_free_fields(htab->map.record, elem->key + round_up(htab->map.key_size, 8)); in htab_free_prealloced_fields()
282 for (i = 0; i < htab->map.max_entries; i++) { in htab_free_elems()
286 htab->map.key_size); in htab_free_elems()
291 bpf_map_area_free(htab->elems); in htab_free_elems()
296 * order is always lru_lock -> bucket_lock and this only happens in
308 struct bpf_lru_node *node = bpf_lru_pop_free(&htab->lru, hash); in prealloc_lru_pop()
312 bpf_map_inc_elem_count(&htab->map); in prealloc_lru_pop()
314 memcpy(l->key, key, htab->map.key_size); in prealloc_lru_pop()
323 u32 num_entries = htab->map.max_entries; in prealloc_init()
324 int err = -ENOMEM, i; in prealloc_init()
329 htab->elems = bpf_map_area_alloc((u64)htab->elem_size * num_entries, in prealloc_init()
330 htab->map.numa_node); in prealloc_init()
331 if (!htab->elems) in prealloc_init()
332 return -ENOMEM; in prealloc_init()
338 u32 size = round_up(htab->map.value_size, 8); in prealloc_init()
341 pptr = bpf_map_alloc_percpu(&htab->map, size, 8, in prealloc_init()
345 htab_elem_set_ptr(get_htab_elem(htab, i), htab->map.key_size, in prealloc_init()
352 err = bpf_lru_init(&htab->lru, in prealloc_init()
353 htab->map.map_flags & BPF_F_NO_COMMON_LRU, in prealloc_init()
354 offsetof(struct htab_elem, hash) - in prealloc_init()
359 err = pcpu_freelist_init(&htab->freelist); in prealloc_init()
365 bpf_lru_populate(&htab->lru, htab->elems, in prealloc_init()
367 htab->elem_size, num_entries); in prealloc_init()
369 pcpu_freelist_populate(&htab->freelist, in prealloc_init()
370 htab->elems + offsetof(struct htab_elem, fnode), in prealloc_init()
371 htab->elem_size, num_entries); in prealloc_init()
385 bpf_lru_destroy(&htab->lru); in prealloc_destroy()
387 pcpu_freelist_destroy(&htab->freelist); in prealloc_destroy()
394 int cpu; in alloc_extra_elems() local
396 pptr = bpf_map_alloc_percpu(&htab->map, sizeof(struct htab_elem *), 8, in alloc_extra_elems()
399 return -ENOMEM; in alloc_extra_elems()
401 for_each_possible_cpu(cpu) { in alloc_extra_elems()
402 l = pcpu_freelist_pop(&htab->freelist); in alloc_extra_elems()
407 *per_cpu_ptr(pptr, cpu) = l_new; in alloc_extra_elems()
409 htab->extra_elems = pptr; in alloc_extra_elems()
416 bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH || in htab_map_alloc_check()
417 attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH); in htab_map_alloc_check()
418 bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH || in htab_map_alloc_check()
419 attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH); in htab_map_alloc_check()
420 /* percpu_lru means each cpu has its own LRU list. in htab_map_alloc_check()
422 * the map's value itself is percpu. percpu_lru has in htab_map_alloc_check()
423 * nothing to do with the map's value. in htab_map_alloc_check()
425 bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU); in htab_map_alloc_check()
426 bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC); in htab_map_alloc_check()
427 bool zero_seed = (attr->map_flags & BPF_F_ZERO_SEED); in htab_map_alloc_check()
435 return -EPERM; in htab_map_alloc_check()
437 if (attr->map_flags & ~HTAB_CREATE_FLAG_MASK || in htab_map_alloc_check()
438 !bpf_map_flags_access_ok(attr->map_flags)) in htab_map_alloc_check()
439 return -EINVAL; in htab_map_alloc_check()
442 return -EINVAL; in htab_map_alloc_check()
445 return -ENOTSUPP; in htab_map_alloc_check()
448 return -EINVAL; in htab_map_alloc_check()
451 * value_size == 0 may be allowed in the future to use map as a set in htab_map_alloc_check()
453 if (attr->max_entries == 0 || attr->key_size == 0 || in htab_map_alloc_check()
454 attr->value_size == 0) in htab_map_alloc_check()
455 return -EINVAL; in htab_map_alloc_check()
457 if ((u64)attr->key_size + attr->value_size >= KMALLOC_MAX_SIZE - in htab_map_alloc_check()
462 * kmalloc-able later in htab_map_update_elem() in htab_map_alloc_check()
464 return -E2BIG; in htab_map_alloc_check()
465 /* percpu map value size is bound by PCPU_MIN_UNIT_SIZE */ in htab_map_alloc_check()
466 if (percpu && round_up(attr->value_size, 8) > PCPU_MIN_UNIT_SIZE) in htab_map_alloc_check()
467 return -E2BIG; in htab_map_alloc_check()
474 bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH || in htab_map_alloc()
475 attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH); in htab_map_alloc()
476 bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH || in htab_map_alloc()
477 attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH); in htab_map_alloc()
478 /* percpu_lru means each cpu has its own LRU list. in htab_map_alloc()
480 * the map's value itself is percpu. percpu_lru has in htab_map_alloc()
481 * nothing to do with the map's value. in htab_map_alloc()
483 bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU); in htab_map_alloc()
484 bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC); in htab_map_alloc()
490 return ERR_PTR(-ENOMEM); in htab_map_alloc()
492 lockdep_register_key(&htab->lockdep_key); in htab_map_alloc()
494 bpf_map_init_from_attr(&htab->map, attr); in htab_map_alloc()
497 /* ensure each CPU's lru list has >=1 elements. in htab_map_alloc()
501 htab->map.max_entries = roundup(attr->max_entries, in htab_map_alloc()
503 if (htab->map.max_entries < attr->max_entries) in htab_map_alloc()
504 htab->map.max_entries = rounddown(attr->max_entries, in htab_map_alloc()
509 * into UB on 32-bit arches, so check that first in htab_map_alloc()
511 err = -E2BIG; in htab_map_alloc()
512 if (htab->map.max_entries > 1UL << 31) in htab_map_alloc()
515 htab->n_buckets = roundup_pow_of_two(htab->map.max_entries); in htab_map_alloc()
517 htab->elem_size = sizeof(struct htab_elem) + in htab_map_alloc()
518 round_up(htab->map.key_size, 8); in htab_map_alloc()
520 htab->elem_size += sizeof(void *); in htab_map_alloc()
522 htab->elem_size += round_up(htab->map.value_size, 8); in htab_map_alloc()
525 if (htab->n_buckets > U32_MAX / sizeof(struct bucket)) in htab_map_alloc()
528 err = bpf_map_init_elem_count(&htab->map); in htab_map_alloc()
532 err = -ENOMEM; in htab_map_alloc()
533 htab->buckets = bpf_map_area_alloc(htab->n_buckets * in htab_map_alloc()
535 htab->map.numa_node); in htab_map_alloc()
536 if (!htab->buckets) in htab_map_alloc()
540 htab->map_locked[i] = bpf_map_alloc_percpu(&htab->map, in htab_map_alloc()
544 if (!htab->map_locked[i]) in htab_map_alloc()
548 if (htab->map.map_flags & BPF_F_ZERO_SEED) in htab_map_alloc()
549 htab->hashrnd = 0; in htab_map_alloc()
551 htab->hashrnd = get_random_u32(); in htab_map_alloc()
557 * htab->max_entries - cur_number_of_elems to be more than batch * num_online_cpus() in htab_map_alloc()
559 * hash map size is 10k, which means that a system with 64 cpus will fill in htab_map_alloc()
561 * define our own batch count as 32 then 10k hash map can be filled up to 80%: in htab_map_alloc()
562 * 10k - 8k > 32 _batch_ * 64 _cpus_ in htab_map_alloc()
563 * and __percpu_counter_compare() will still be fast. At that point hash map in htab_map_alloc()
564 * collisions will dominate its performance anyway. Assume that hash map filled in htab_map_alloc()
569 if (attr->max_entries / 2 > num_online_cpus() * PERCPU_COUNTER_BATCH) in htab_map_alloc()
570 htab->use_percpu_counter = true; in htab_map_alloc()
572 if (htab->use_percpu_counter) { in htab_map_alloc()
573 err = percpu_counter_init(&htab->pcount, 0, GFP_KERNEL); in htab_map_alloc()
592 err = bpf_mem_alloc_init(&htab->ma, htab->elem_size, false); in htab_map_alloc()
596 err = bpf_mem_alloc_init(&htab->pcpu_ma, in htab_map_alloc()
597 round_up(htab->map.value_size, 8), true); in htab_map_alloc()
603 return &htab->map; in htab_map_alloc()
608 if (htab->use_percpu_counter) in htab_map_alloc()
609 percpu_counter_destroy(&htab->pcount); in htab_map_alloc()
611 free_percpu(htab->map_locked[i]); in htab_map_alloc()
612 bpf_map_area_free(htab->buckets); in htab_map_alloc()
613 bpf_mem_alloc_destroy(&htab->pcpu_ma); in htab_map_alloc()
614 bpf_mem_alloc_destroy(&htab->ma); in htab_map_alloc()
616 bpf_map_free_elem_count(&htab->map); in htab_map_alloc()
618 lockdep_unregister_key(&htab->lockdep_key); in htab_map_alloc()
632 return &htab->buckets[hash & (htab->n_buckets - 1)]; in __select_bucket()
637 return &__select_bucket(htab, hash)->head; in select_bucket()
648 if (l->hash == hash && !memcmp(&l->key, key, key_size)) in lookup_elem_raw()
667 if (l->hash == hash && !memcmp(&l->key, key, key_size)) in lookup_nulls_elem_raw()
670 if (unlikely(get_nulls_value(n) != (hash & (n_buckets - 1)))) in lookup_nulls_elem_raw()
681 static void *__htab_map_lookup_elem(struct bpf_map *map, void *key) in __htab_map_lookup_elem() argument
683 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in __htab_map_lookup_elem()
691 key_size = map->key_size; in __htab_map_lookup_elem()
693 hash = htab_map_hash(key, key_size, htab->hashrnd); in __htab_map_lookup_elem()
697 l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets); in __htab_map_lookup_elem()
702 static void *htab_map_lookup_elem(struct bpf_map *map, void *key) in htab_map_lookup_elem() argument
704 struct htab_elem *l = __htab_map_lookup_elem(map, key); in htab_map_lookup_elem()
707 return l->key + round_up(map->key_size, 8); in htab_map_lookup_elem()
716 * map->ops->map_lookup_elem
723 static int htab_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf) in htab_map_gen_lookup() argument
729 (void *(*)(struct bpf_map *map, void *key))NULL)); in htab_map_gen_lookup()
734 round_up(map->key_size, 8)); in htab_map_gen_lookup()
735 return insn - insn_buf; in htab_map_gen_lookup()
738 static __always_inline void *__htab_lru_map_lookup_elem(struct bpf_map *map, in __htab_lru_map_lookup_elem() argument
741 struct htab_elem *l = __htab_map_lookup_elem(map, key); in __htab_lru_map_lookup_elem()
745 bpf_lru_node_set_ref(&l->lru_node); in __htab_lru_map_lookup_elem()
746 return l->key + round_up(map->key_size, 8); in __htab_lru_map_lookup_elem()
752 static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key) in htab_lru_map_lookup_elem() argument
754 return __htab_lru_map_lookup_elem(map, key, true); in htab_lru_map_lookup_elem()
757 static void *htab_lru_map_lookup_elem_sys(struct bpf_map *map, void *key) in htab_lru_map_lookup_elem_sys() argument
759 return __htab_lru_map_lookup_elem(map, key, false); in htab_lru_map_lookup_elem_sys()
762 static int htab_lru_map_gen_lookup(struct bpf_map *map, in htab_lru_map_gen_lookup() argument
770 (void *(*)(struct bpf_map *map, void *key))NULL)); in htab_lru_map_gen_lookup()
783 round_up(map->key_size, 8)); in htab_lru_map_gen_lookup()
784 return insn - insn_buf; in htab_lru_map_gen_lookup()
791 void __percpu *pptr = htab_elem_get_ptr(elem, htab->map.key_size); in check_and_free_fields()
792 int cpu; in check_and_free_fields() local
794 for_each_possible_cpu(cpu) in check_and_free_fields()
795 bpf_obj_free_fields(htab->map.record, per_cpu_ptr(pptr, cpu)); in check_and_free_fields()
797 void *map_value = elem->key + round_up(htab->map.key_size, 8); in check_and_free_fields()
799 bpf_obj_free_fields(htab->map.record, map_value); in check_and_free_fields()
817 b = __select_bucket(htab, tgt_l->hash); in htab_lru_map_delete_node()
818 head = &b->head; in htab_lru_map_delete_node()
820 ret = htab_lock_bucket(htab, b, tgt_l->hash, &flags); in htab_lru_map_delete_node()
826 hlist_nulls_del_rcu(&l->hash_node); in htab_lru_map_delete_node()
827 bpf_map_dec_elem_count(&htab->map); in htab_lru_map_delete_node()
831 htab_unlock_bucket(htab, b, tgt_l->hash, flags); in htab_lru_map_delete_node()
839 static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key) in htab_map_get_next_key() argument
841 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_map_get_next_key()
849 key_size = map->key_size; in htab_map_get_next_key()
854 hash = htab_map_hash(key, key_size, htab->hashrnd); in htab_map_get_next_key()
859 l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets); in htab_map_get_next_key()
865 next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_next_rcu(&l->hash_node)), in htab_map_get_next_key()
869 /* if next elem in this hash list is non-zero, just return it */ in htab_map_get_next_key()
870 memcpy(next_key, next_l->key, key_size); in htab_map_get_next_key()
875 i = hash & (htab->n_buckets - 1); in htab_map_get_next_key()
880 for (; i < htab->n_buckets; i++) { in htab_map_get_next_key()
888 memcpy(next_key, next_l->key, key_size); in htab_map_get_next_key()
894 return -ENOENT; in htab_map_get_next_key()
901 if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH) in htab_elem_free()
902 bpf_mem_cache_free(&htab->pcpu_ma, l->ptr_to_pptr); in htab_elem_free()
903 bpf_mem_cache_free(&htab->ma, l); in htab_elem_free()
908 struct bpf_map *map = &htab->map; in htab_put_fd_value() local
911 if (map->ops->map_fd_put_ptr) { in htab_put_fd_value()
912 ptr = fd_htab_map_get_ptr(map, l); in htab_put_fd_value()
913 map->ops->map_fd_put_ptr(map, ptr, true); in htab_put_fd_value()
919 if (htab->use_percpu_counter) in is_map_full()
920 return __percpu_counter_compare(&htab->pcount, htab->map.max_entries, in is_map_full()
922 return atomic_read(&htab->count) >= htab->map.max_entries; in is_map_full()
927 bpf_map_inc_elem_count(&htab->map); in inc_elem_count()
929 if (htab->use_percpu_counter) in inc_elem_count()
930 percpu_counter_add_batch(&htab->pcount, 1, PERCPU_COUNTER_BATCH); in inc_elem_count()
932 atomic_inc(&htab->count); in inc_elem_count()
937 bpf_map_dec_elem_count(&htab->map); in dec_elem_count()
939 if (htab->use_percpu_counter) in dec_elem_count()
940 percpu_counter_add_batch(&htab->pcount, -1, PERCPU_COUNTER_BATCH); in dec_elem_count()
942 atomic_dec(&htab->count); in dec_elem_count()
951 bpf_map_dec_elem_count(&htab->map); in free_htab_elem()
953 pcpu_freelist_push(&htab->freelist, &l->fnode); in free_htab_elem()
965 copy_map_value(&htab->map, this_cpu_ptr(pptr), value); in pcpu_copy_value()
967 u32 size = round_up(htab->map.value_size, 8); in pcpu_copy_value()
968 int off = 0, cpu; in pcpu_copy_value() local
970 for_each_possible_cpu(cpu) { in pcpu_copy_value()
971 copy_map_value_long(&htab->map, per_cpu_ptr(pptr, cpu), value + off); in pcpu_copy_value()
980 /* When not setting the initial value on all cpus, zero-fill element in pcpu_init_value()
987 int cpu; in pcpu_init_value() local
989 for_each_possible_cpu(cpu) { in pcpu_init_value()
990 if (cpu == current_cpu) in pcpu_init_value()
991 copy_map_value_long(&htab->map, per_cpu_ptr(pptr, cpu), value); in pcpu_init_value()
993 zero_map_value(&htab->map, per_cpu_ptr(pptr, cpu)); in pcpu_init_value()
1002 return htab->map.map_type == BPF_MAP_TYPE_HASH_OF_MAPS && in fd_htab_map_needs_adjust()
1011 u32 size = htab->map.value_size; in alloc_htab_elem()
1019 * use per-cpu extra elems to avoid freelist_pop/push in alloc_htab_elem()
1021 pl_new = this_cpu_ptr(htab->extra_elems); in alloc_htab_elem()
1027 l = __pcpu_freelist_pop(&htab->freelist); in alloc_htab_elem()
1029 return ERR_PTR(-E2BIG); in alloc_htab_elem()
1031 bpf_map_inc_elem_count(&htab->map); in alloc_htab_elem()
1036 /* when map is full and update() is replacing in alloc_htab_elem()
1041 return ERR_PTR(-E2BIG); in alloc_htab_elem()
1043 l_new = bpf_mem_cache_alloc(&htab->ma); in alloc_htab_elem()
1045 l_new = ERR_PTR(-ENOMEM); in alloc_htab_elem()
1050 memcpy(l_new->key, key, key_size); in alloc_htab_elem()
1055 /* alloc_percpu zero-fills */ in alloc_htab_elem()
1056 void *ptr = bpf_mem_cache_alloc(&htab->pcpu_ma); in alloc_htab_elem()
1059 bpf_mem_cache_free(&htab->ma, l_new); in alloc_htab_elem()
1060 l_new = ERR_PTR(-ENOMEM); in alloc_htab_elem()
1063 l_new->ptr_to_pptr = ptr; in alloc_htab_elem()
1073 memcpy(l_new->key + round_up(key_size, 8), value, size); in alloc_htab_elem()
1075 copy_map_value(&htab->map, in alloc_htab_elem()
1076 l_new->key + round_up(key_size, 8), in alloc_htab_elem()
1080 l_new->hash = hash; in alloc_htab_elem()
1092 return -EEXIST; in check_flags()
1096 return -ENOENT; in check_flags()
1102 static long htab_map_update_elem(struct bpf_map *map, void *key, void *value, in htab_map_update_elem() argument
1105 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_map_update_elem()
1116 return -EINVAL; in htab_map_update_elem()
1121 key_size = map->key_size; in htab_map_update_elem()
1123 hash = htab_map_hash(key, key_size, htab->hashrnd); in htab_map_update_elem()
1126 head = &b->head; in htab_map_update_elem()
1129 if (unlikely(!btf_record_has_field(map->record, BPF_SPIN_LOCK))) in htab_map_update_elem()
1130 return -EINVAL; in htab_map_update_elem()
1133 htab->n_buckets); in htab_map_update_elem()
1139 copy_map_value_locked(map, in htab_map_update_elem()
1140 l_old->key + round_up(key_size, 8), in htab_map_update_elem()
1167 copy_map_value_locked(map, in htab_map_update_elem()
1168 l_old->key + round_up(key_size, 8), in htab_map_update_elem()
1177 /* all pre-allocated elements are in use or memory exhausted */ in htab_map_update_elem()
1185 hlist_nulls_add_head_rcu(&l_new->hash_node, head); in htab_map_update_elem()
1187 hlist_nulls_del_rcu(&l_old->hash_node); in htab_map_update_elem()
1189 /* l_old has already been stashed in htab->extra_elems, free in htab_map_update_elem()
1191 * save the old map pointer in htab of maps before unlock in htab_map_update_elem()
1196 if (map->ops->map_fd_put_ptr) in htab_map_update_elem()
1197 old_map_ptr = fd_htab_map_get_ptr(map, l_old); in htab_map_update_elem()
1204 map->ops->map_fd_put_ptr(map, old_map_ptr, true); in htab_map_update_elem()
1217 bpf_map_dec_elem_count(&htab->map); in htab_lru_push_free()
1218 bpf_lru_push_free(&htab->lru, &elem->lru_node); in htab_lru_push_free()
1221 static long htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value, in htab_lru_map_update_elem() argument
1224 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_lru_map_update_elem()
1234 return -EINVAL; in htab_lru_map_update_elem()
1239 key_size = map->key_size; in htab_lru_map_update_elem()
1241 hash = htab_map_hash(key, key_size, htab->hashrnd); in htab_lru_map_update_elem()
1244 head = &b->head; in htab_lru_map_update_elem()
1253 return -ENOMEM; in htab_lru_map_update_elem()
1254 copy_map_value(&htab->map, in htab_lru_map_update_elem()
1255 l_new->key + round_up(map->key_size, 8), value); in htab_lru_map_update_elem()
1270 hlist_nulls_add_head_rcu(&l_new->hash_node, head); in htab_lru_map_update_elem()
1272 bpf_lru_node_set_ref(&l_new->lru_node); in htab_lru_map_update_elem()
1273 hlist_nulls_del_rcu(&l_old->hash_node); in htab_lru_map_update_elem()
1289 static long __htab_percpu_map_update_elem(struct bpf_map *map, void *key, in __htab_percpu_map_update_elem() argument
1293 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in __htab_percpu_map_update_elem()
1303 return -EINVAL; in __htab_percpu_map_update_elem()
1308 key_size = map->key_size; in __htab_percpu_map_update_elem()
1310 hash = htab_map_hash(key, key_size, htab->hashrnd); in __htab_percpu_map_update_elem()
1313 head = &b->head; in __htab_percpu_map_update_elem()
1326 /* per-cpu hash map can update value in-place */ in __htab_percpu_map_update_elem()
1336 hlist_nulls_add_head_rcu(&l_new->hash_node, head); in __htab_percpu_map_update_elem()
1344 static long __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key, in __htab_lru_percpu_map_update_elem() argument
1348 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in __htab_lru_percpu_map_update_elem()
1358 return -EINVAL; in __htab_lru_percpu_map_update_elem()
1363 key_size = map->key_size; in __htab_lru_percpu_map_update_elem()
1365 hash = htab_map_hash(key, key_size, htab->hashrnd); in __htab_lru_percpu_map_update_elem()
1368 head = &b->head; in __htab_lru_percpu_map_update_elem()
1378 return -ENOMEM; in __htab_lru_percpu_map_update_elem()
1392 bpf_lru_node_set_ref(&l_old->lru_node); in __htab_lru_percpu_map_update_elem()
1394 /* per-cpu hash map can update value in-place */ in __htab_lru_percpu_map_update_elem()
1400 hlist_nulls_add_head_rcu(&l_new->hash_node, head); in __htab_lru_percpu_map_update_elem()
1408 bpf_map_dec_elem_count(&htab->map); in __htab_lru_percpu_map_update_elem()
1409 bpf_lru_push_free(&htab->lru, &l_new->lru_node); in __htab_lru_percpu_map_update_elem()
1414 static long htab_percpu_map_update_elem(struct bpf_map *map, void *key, in htab_percpu_map_update_elem() argument
1417 return __htab_percpu_map_update_elem(map, key, value, map_flags, false); in htab_percpu_map_update_elem()
1420 static long htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key, in htab_lru_percpu_map_update_elem() argument
1423 return __htab_lru_percpu_map_update_elem(map, key, value, map_flags, in htab_lru_percpu_map_update_elem()
1428 static long htab_map_delete_elem(struct bpf_map *map, void *key) in htab_map_delete_elem() argument
1430 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_map_delete_elem()
1441 key_size = map->key_size; in htab_map_delete_elem()
1443 hash = htab_map_hash(key, key_size, htab->hashrnd); in htab_map_delete_elem()
1445 head = &b->head; in htab_map_delete_elem()
1453 hlist_nulls_del_rcu(&l->hash_node); in htab_map_delete_elem()
1455 ret = -ENOENT; in htab_map_delete_elem()
1464 static long htab_lru_map_delete_elem(struct bpf_map *map, void *key) in htab_lru_map_delete_elem() argument
1466 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_lru_map_delete_elem()
1477 key_size = map->key_size; in htab_lru_map_delete_elem()
1479 hash = htab_map_hash(key, key_size, htab->hashrnd); in htab_lru_map_delete_elem()
1481 head = &b->head; in htab_lru_map_delete_elem()
1490 hlist_nulls_del_rcu(&l->hash_node); in htab_lru_map_delete_elem()
1492 ret = -ENOENT; in htab_lru_map_delete_elem()
1507 for (i = 0; i < htab->n_buckets; i++) { in delete_all_elements()
1513 hlist_nulls_del_rcu(&l->hash_node); in delete_all_elements()
1525 for (i = 0; i < htab->n_buckets; i++) { in htab_free_malloced_timers_and_wq()
1532 if (btf_record_has_field(htab->map.record, BPF_TIMER)) in htab_free_malloced_timers_and_wq()
1533 bpf_obj_free_timer(htab->map.record, in htab_free_malloced_timers_and_wq()
1534 l->key + round_up(htab->map.key_size, 8)); in htab_free_malloced_timers_and_wq()
1535 if (btf_record_has_field(htab->map.record, BPF_WORKQUEUE)) in htab_free_malloced_timers_and_wq()
1536 bpf_obj_free_workqueue(htab->map.record, in htab_free_malloced_timers_and_wq()
1537 l->key + round_up(htab->map.key_size, 8)); in htab_free_malloced_timers_and_wq()
1544 static void htab_map_free_timers_and_wq(struct bpf_map *map) in htab_map_free_timers_and_wq() argument
1546 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_map_free_timers_and_wq()
1549 if (btf_record_has_field(htab->map.record, BPF_TIMER | BPF_WORKQUEUE)) { in htab_map_free_timers_and_wq()
1557 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
1558 static void htab_map_free(struct bpf_map *map) in htab_map_free() argument
1560 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_map_free()
1565 * There is no need to synchronize_rcu() here to protect map elements. in htab_map_free()
1579 bpf_map_free_elem_count(map); in htab_map_free()
1580 free_percpu(htab->extra_elems); in htab_map_free()
1581 bpf_map_area_free(htab->buckets); in htab_map_free()
1582 bpf_mem_alloc_destroy(&htab->pcpu_ma); in htab_map_free()
1583 bpf_mem_alloc_destroy(&htab->ma); in htab_map_free()
1584 if (htab->use_percpu_counter) in htab_map_free()
1585 percpu_counter_destroy(&htab->pcount); in htab_map_free()
1587 free_percpu(htab->map_locked[i]); in htab_map_free()
1588 lockdep_unregister_key(&htab->lockdep_key); in htab_map_free()
1592 static void htab_map_seq_show_elem(struct bpf_map *map, void *key, in htab_map_seq_show_elem() argument
1599 value = htab_map_lookup_elem(map, key); in htab_map_seq_show_elem()
1605 btf_type_seq_show(map->btf, map->btf_key_type_id, key, m); in htab_map_seq_show_elem()
1607 btf_type_seq_show(map->btf, map->btf_value_type_id, value, m); in htab_map_seq_show_elem()
1613 static int __htab_map_lookup_and_delete_elem(struct bpf_map *map, void *key, in __htab_map_lookup_and_delete_elem() argument
1617 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in __htab_map_lookup_and_delete_elem()
1625 key_size = map->key_size; in __htab_map_lookup_and_delete_elem()
1627 hash = htab_map_hash(key, key_size, htab->hashrnd); in __htab_map_lookup_and_delete_elem()
1629 head = &b->head; in __htab_map_lookup_and_delete_elem()
1637 ret = -ENOENT; in __htab_map_lookup_and_delete_elem()
1642 u32 roundup_value_size = round_up(map->value_size, 8); in __htab_map_lookup_and_delete_elem()
1644 int off = 0, cpu; in __htab_map_lookup_and_delete_elem() local
1647 for_each_possible_cpu(cpu) { in __htab_map_lookup_and_delete_elem()
1648 copy_map_value_long(&htab->map, value + off, per_cpu_ptr(pptr, cpu)); in __htab_map_lookup_and_delete_elem()
1649 check_and_init_map_value(&htab->map, value + off); in __htab_map_lookup_and_delete_elem()
1653 u32 roundup_key_size = round_up(map->key_size, 8); in __htab_map_lookup_and_delete_elem()
1656 copy_map_value_locked(map, value, l->key + in __htab_map_lookup_and_delete_elem()
1660 copy_map_value(map, value, l->key + in __htab_map_lookup_and_delete_elem()
1663 check_and_init_map_value(map, value); in __htab_map_lookup_and_delete_elem()
1665 hlist_nulls_del_rcu(&l->hash_node); in __htab_map_lookup_and_delete_elem()
1680 static int htab_map_lookup_and_delete_elem(struct bpf_map *map, void *key, in htab_map_lookup_and_delete_elem() argument
1683 return __htab_map_lookup_and_delete_elem(map, key, value, false, false, in htab_map_lookup_and_delete_elem()
1687 static int htab_percpu_map_lookup_and_delete_elem(struct bpf_map *map, in htab_percpu_map_lookup_and_delete_elem() argument
1691 return __htab_map_lookup_and_delete_elem(map, key, value, false, true, in htab_percpu_map_lookup_and_delete_elem()
1695 static int htab_lru_map_lookup_and_delete_elem(struct bpf_map *map, void *key, in htab_lru_map_lookup_and_delete_elem() argument
1698 return __htab_map_lookup_and_delete_elem(map, key, value, true, false, in htab_lru_map_lookup_and_delete_elem()
1702 static int htab_lru_percpu_map_lookup_and_delete_elem(struct bpf_map *map, in htab_lru_percpu_map_lookup_and_delete_elem() argument
1706 return __htab_map_lookup_and_delete_elem(map, key, value, true, true, in htab_lru_percpu_map_lookup_and_delete_elem()
1711 __htab_map_lookup_and_delete_batch(struct bpf_map *map, in __htab_map_lookup_and_delete_batch() argument
1717 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in __htab_map_lookup_and_delete_batch()
1720 void __user *uvalues = u64_to_user_ptr(attr->batch.values); in __htab_map_lookup_and_delete_batch()
1721 void __user *ukeys = u64_to_user_ptr(attr->batch.keys); in __htab_map_lookup_and_delete_batch()
1722 void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch); in __htab_map_lookup_and_delete_batch()
1734 elem_map_flags = attr->batch.elem_flags; in __htab_map_lookup_and_delete_batch()
1736 ((elem_map_flags & BPF_F_LOCK) && !btf_record_has_field(map->record, BPF_SPIN_LOCK))) in __htab_map_lookup_and_delete_batch()
1737 return -EINVAL; in __htab_map_lookup_and_delete_batch()
1739 map_flags = attr->batch.flags; in __htab_map_lookup_and_delete_batch()
1741 return -EINVAL; in __htab_map_lookup_and_delete_batch()
1743 max_count = attr->batch.count; in __htab_map_lookup_and_delete_batch()
1747 if (put_user(0, &uattr->batch.count)) in __htab_map_lookup_and_delete_batch()
1748 return -EFAULT; in __htab_map_lookup_and_delete_batch()
1752 return -EFAULT; in __htab_map_lookup_and_delete_batch()
1754 if (batch >= htab->n_buckets) in __htab_map_lookup_and_delete_batch()
1755 return -ENOENT; in __htab_map_lookup_and_delete_batch()
1757 key_size = htab->map.key_size; in __htab_map_lookup_and_delete_batch()
1758 roundup_key_size = round_up(htab->map.key_size, 8); in __htab_map_lookup_and_delete_batch()
1759 value_size = htab->map.value_size; in __htab_map_lookup_and_delete_batch()
1776 ret = -ENOMEM; in __htab_map_lookup_and_delete_batch()
1786 b = &htab->buckets[batch]; in __htab_map_lookup_and_delete_batch()
1787 head = &b->head; in __htab_map_lookup_and_delete_batch()
1807 if (bucket_cnt > (max_count - total)) { in __htab_map_lookup_and_delete_batch()
1809 ret = -ENOSPC; in __htab_map_lookup_and_delete_batch()
1837 memcpy(dst_key, l->key, key_size); in __htab_map_lookup_and_delete_batch()
1840 int off = 0, cpu; in __htab_map_lookup_and_delete_batch() local
1843 pptr = htab_elem_get_ptr(l, map->key_size); in __htab_map_lookup_and_delete_batch()
1844 for_each_possible_cpu(cpu) { in __htab_map_lookup_and_delete_batch()
1845 copy_map_value_long(&htab->map, dst_val + off, per_cpu_ptr(pptr, cpu)); in __htab_map_lookup_and_delete_batch()
1846 check_and_init_map_value(&htab->map, dst_val + off); in __htab_map_lookup_and_delete_batch()
1850 value = l->key + roundup_key_size; in __htab_map_lookup_and_delete_batch()
1851 if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) { in __htab_map_lookup_and_delete_batch()
1854 /* Actual value is the id of the inner map */ in __htab_map_lookup_and_delete_batch()
1855 map_id = map->ops->map_fd_sys_lookup_elem(*inner_map); in __htab_map_lookup_and_delete_batch()
1860 copy_map_value_locked(map, dst_val, value, in __htab_map_lookup_and_delete_batch()
1863 copy_map_value(map, dst_val, value); in __htab_map_lookup_and_delete_batch()
1865 check_and_init_map_value(map, dst_val); in __htab_map_lookup_and_delete_batch()
1868 hlist_nulls_del_rcu(&l->hash_node); in __htab_map_lookup_and_delete_batch()
1880 l->batch_flink = node_to_free; in __htab_map_lookup_and_delete_batch()
1892 node_to_free = node_to_free->batch_flink; in __htab_map_lookup_and_delete_batch()
1903 if (!bucket_cnt && (batch + 1 < htab->n_buckets)) { in __htab_map_lookup_and_delete_batch()
1914 ret = -EFAULT; in __htab_map_lookup_and_delete_batch()
1920 if (batch >= htab->n_buckets) { in __htab_map_lookup_and_delete_batch()
1921 ret = -ENOENT; in __htab_map_lookup_and_delete_batch()
1927 if (ret == -EFAULT) in __htab_map_lookup_and_delete_batch()
1931 ubatch = u64_to_user_ptr(attr->batch.out_batch); in __htab_map_lookup_and_delete_batch()
1933 put_user(total, &uattr->batch.count)) in __htab_map_lookup_and_delete_batch()
1934 ret = -EFAULT; in __htab_map_lookup_and_delete_batch()
1943 htab_percpu_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr, in htab_percpu_map_lookup_batch() argument
1946 return __htab_map_lookup_and_delete_batch(map, attr, uattr, false, in htab_percpu_map_lookup_batch()
1951 htab_percpu_map_lookup_and_delete_batch(struct bpf_map *map, in htab_percpu_map_lookup_and_delete_batch() argument
1955 return __htab_map_lookup_and_delete_batch(map, attr, uattr, true, in htab_percpu_map_lookup_and_delete_batch()
1960 htab_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr, in htab_map_lookup_batch() argument
1963 return __htab_map_lookup_and_delete_batch(map, attr, uattr, false, in htab_map_lookup_batch()
1968 htab_map_lookup_and_delete_batch(struct bpf_map *map, in htab_map_lookup_and_delete_batch() argument
1972 return __htab_map_lookup_and_delete_batch(map, attr, uattr, true, in htab_map_lookup_and_delete_batch()
1977 htab_lru_percpu_map_lookup_batch(struct bpf_map *map, in htab_lru_percpu_map_lookup_batch() argument
1981 return __htab_map_lookup_and_delete_batch(map, attr, uattr, false, in htab_lru_percpu_map_lookup_batch()
1986 htab_lru_percpu_map_lookup_and_delete_batch(struct bpf_map *map, in htab_lru_percpu_map_lookup_and_delete_batch() argument
1990 return __htab_map_lookup_and_delete_batch(map, attr, uattr, true, in htab_lru_percpu_map_lookup_and_delete_batch()
1995 htab_lru_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr, in htab_lru_map_lookup_batch() argument
1998 return __htab_map_lookup_and_delete_batch(map, attr, uattr, false, in htab_lru_map_lookup_batch()
2003 htab_lru_map_lookup_and_delete_batch(struct bpf_map *map, in htab_lru_map_lookup_and_delete_batch() argument
2007 return __htab_map_lookup_and_delete_batch(map, attr, uattr, true, in htab_lru_map_lookup_and_delete_batch()
2012 struct bpf_map *map; member
2014 void *percpu_value_buf; // non-zero means percpu hash
2023 const struct bpf_htab *htab = info->htab; in bpf_hash_map_seq_find_next()
2024 u32 skip_elems = info->skip_elems; in bpf_hash_map_seq_find_next()
2025 u32 bucket_id = info->bucket_id; in bpf_hash_map_seq_find_next()
2032 if (bucket_id >= htab->n_buckets) in bpf_hash_map_seq_find_next()
2040 n = rcu_dereference_raw(hlist_nulls_next_rcu(&prev_elem->hash_node)); in bpf_hash_map_seq_find_next()
2046 b = &htab->buckets[bucket_id++]; in bpf_hash_map_seq_find_next()
2051 for (i = bucket_id; i < htab->n_buckets; i++) { in bpf_hash_map_seq_find_next()
2052 b = &htab->buckets[i]; in bpf_hash_map_seq_find_next()
2056 head = &b->head; in bpf_hash_map_seq_find_next()
2059 info->bucket_id = i; in bpf_hash_map_seq_find_next()
2060 info->skip_elems = count; in bpf_hash_map_seq_find_next()
2070 info->bucket_id = i; in bpf_hash_map_seq_find_next()
2071 info->skip_elems = 0; in bpf_hash_map_seq_find_next()
2077 struct bpf_iter_seq_hash_map_info *info = seq->private; in bpf_hash_map_seq_start()
2091 struct bpf_iter_seq_hash_map_info *info = seq->private; in bpf_hash_map_seq_next()
2094 ++info->skip_elems; in bpf_hash_map_seq_next()
2100 struct bpf_iter_seq_hash_map_info *info = seq->private; in __bpf_hash_map_seq_show()
2103 struct bpf_map *map = info->map; in __bpf_hash_map_seq_show() local
2105 int ret = 0, off = 0, cpu; in __bpf_hash_map_seq_show() local
2113 ctx.map = info->map; in __bpf_hash_map_seq_show()
2115 roundup_key_size = round_up(map->key_size, 8); in __bpf_hash_map_seq_show()
2116 ctx.key = elem->key; in __bpf_hash_map_seq_show()
2117 if (!info->percpu_value_buf) { in __bpf_hash_map_seq_show()
2118 ctx.value = elem->key + roundup_key_size; in __bpf_hash_map_seq_show()
2120 roundup_value_size = round_up(map->value_size, 8); in __bpf_hash_map_seq_show()
2121 pptr = htab_elem_get_ptr(elem, map->key_size); in __bpf_hash_map_seq_show()
2122 for_each_possible_cpu(cpu) { in __bpf_hash_map_seq_show()
2123 copy_map_value_long(map, info->percpu_value_buf + off, in __bpf_hash_map_seq_show()
2124 per_cpu_ptr(pptr, cpu)); in __bpf_hash_map_seq_show()
2125 check_and_init_map_value(map, info->percpu_value_buf + off); in __bpf_hash_map_seq_show()
2128 ctx.value = info->percpu_value_buf; in __bpf_hash_map_seq_show()
2154 struct bpf_map *map = aux->map; in bpf_iter_init_hash_map() local
2158 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || in bpf_iter_init_hash_map()
2159 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { in bpf_iter_init_hash_map()
2160 buf_size = round_up(map->value_size, 8) * num_possible_cpus(); in bpf_iter_init_hash_map()
2163 return -ENOMEM; in bpf_iter_init_hash_map()
2165 seq_info->percpu_value_buf = value_buf; in bpf_iter_init_hash_map()
2168 bpf_map_inc_with_uref(map); in bpf_iter_init_hash_map()
2169 seq_info->map = map; in bpf_iter_init_hash_map()
2170 seq_info->htab = container_of(map, struct bpf_htab, map); in bpf_iter_init_hash_map()
2178 bpf_map_put_with_uref(seq_info->map); in bpf_iter_fini_hash_map()
2179 kfree(seq_info->percpu_value_buf); in bpf_iter_fini_hash_map()
2196 static long bpf_for_each_hash_elem(struct bpf_map *map, bpf_callback_t callback_fn, in bpf_for_each_hash_elem() argument
2199 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in bpf_for_each_hash_elem()
2214 return -EINVAL; in bpf_for_each_hash_elem()
2218 roundup_key_size = round_up(map->key_size, 8); in bpf_for_each_hash_elem()
2223 for (i = 0; i < htab->n_buckets; i++) { in bpf_for_each_hash_elem()
2224 b = &htab->buckets[i]; in bpf_for_each_hash_elem()
2226 head = &b->head; in bpf_for_each_hash_elem()
2228 key = elem->key; in bpf_for_each_hash_elem()
2230 /* current cpu value for percpu map */ in bpf_for_each_hash_elem()
2231 pptr = htab_elem_get_ptr(elem, map->key_size); in bpf_for_each_hash_elem()
2234 val = elem->key + roundup_key_size; in bpf_for_each_hash_elem()
2237 ret = callback_fn((u64)(long)map, (u64)(long)key, in bpf_for_each_hash_elem()
2239 /* return value: 0 - continue, 1 - stop and return */ in bpf_for_each_hash_elem()
2251 static u64 htab_map_mem_usage(const struct bpf_map *map) in htab_map_mem_usage() argument
2253 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_map_mem_usage()
2254 u32 value_size = round_up(htab->map.value_size, 8); in htab_map_mem_usage()
2261 usage += sizeof(struct bucket) * htab->n_buckets; in htab_map_mem_usage()
2264 num_entries = map->max_entries; in htab_map_mem_usage()
2268 usage += htab->elem_size * num_entries; in htab_map_mem_usage()
2277 num_entries = htab->use_percpu_counter ? in htab_map_mem_usage()
2278 percpu_counter_sum(&htab->pcount) : in htab_map_mem_usage()
2279 atomic_read(&htab->count); in htab_map_mem_usage()
2280 usage += (htab->elem_size + LLIST_NODE_SZ) * num_entries; in htab_map_mem_usage()
2334 static void *htab_percpu_map_lookup_elem(struct bpf_map *map, void *key) in htab_percpu_map_lookup_elem() argument
2336 struct htab_elem *l = __htab_map_lookup_elem(map, key); in htab_percpu_map_lookup_elem()
2339 return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size)); in htab_percpu_map_lookup_elem()
2344 /* inline bpf_map_lookup_elem() call for per-CPU hashmap */
2345 static int htab_percpu_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf) in htab_percpu_map_gen_lookup() argument
2350 return -EOPNOTSUPP; in htab_percpu_map_gen_lookup()
2353 (void *(*)(struct bpf_map *map, void *key))NULL)); in htab_percpu_map_gen_lookup()
2357 offsetof(struct htab_elem, key) + map->key_size); in htab_percpu_map_gen_lookup()
2361 return insn - insn_buf; in htab_percpu_map_gen_lookup()
2364 static void *htab_percpu_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu) in htab_percpu_map_lookup_percpu_elem() argument
2368 if (cpu >= nr_cpu_ids) in htab_percpu_map_lookup_percpu_elem()
2371 l = __htab_map_lookup_elem(map, key); in htab_percpu_map_lookup_percpu_elem()
2373 return per_cpu_ptr(htab_elem_get_ptr(l, map->key_size), cpu); in htab_percpu_map_lookup_percpu_elem()
2378 static void *htab_lru_percpu_map_lookup_elem(struct bpf_map *map, void *key) in htab_lru_percpu_map_lookup_elem() argument
2380 struct htab_elem *l = __htab_map_lookup_elem(map, key); in htab_lru_percpu_map_lookup_elem()
2383 bpf_lru_node_set_ref(&l->lru_node); in htab_lru_percpu_map_lookup_elem()
2384 return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size)); in htab_lru_percpu_map_lookup_elem()
2390 static void *htab_lru_percpu_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu) in htab_lru_percpu_map_lookup_percpu_elem() argument
2394 if (cpu >= nr_cpu_ids) in htab_lru_percpu_map_lookup_percpu_elem()
2397 l = __htab_map_lookup_elem(map, key); in htab_lru_percpu_map_lookup_percpu_elem()
2399 bpf_lru_node_set_ref(&l->lru_node); in htab_lru_percpu_map_lookup_percpu_elem()
2400 return per_cpu_ptr(htab_elem_get_ptr(l, map->key_size), cpu); in htab_lru_percpu_map_lookup_percpu_elem()
2406 int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value) in bpf_percpu_hash_copy() argument
2410 int ret = -ENOENT; in bpf_percpu_hash_copy()
2411 int cpu, off = 0; in bpf_percpu_hash_copy() local
2414 /* per_cpu areas are zero-filled and bpf programs can only in bpf_percpu_hash_copy()
2418 size = round_up(map->value_size, 8); in bpf_percpu_hash_copy()
2420 l = __htab_map_lookup_elem(map, key); in bpf_percpu_hash_copy()
2423 /* We do not mark LRU map element here in order to not mess up in bpf_percpu_hash_copy()
2424 * eviction heuristics when user space does a map walk. in bpf_percpu_hash_copy()
2426 pptr = htab_elem_get_ptr(l, map->key_size); in bpf_percpu_hash_copy()
2427 for_each_possible_cpu(cpu) { in bpf_percpu_hash_copy()
2428 copy_map_value_long(map, value + off, per_cpu_ptr(pptr, cpu)); in bpf_percpu_hash_copy()
2429 check_and_init_map_value(map, value + off); in bpf_percpu_hash_copy()
2438 int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value, in bpf_percpu_hash_update() argument
2441 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in bpf_percpu_hash_update()
2446 ret = __htab_lru_percpu_map_update_elem(map, key, value, in bpf_percpu_hash_update()
2449 ret = __htab_percpu_map_update_elem(map, key, value, map_flags, in bpf_percpu_hash_update()
2456 static void htab_percpu_map_seq_show_elem(struct bpf_map *map, void *key, in htab_percpu_map_seq_show_elem() argument
2461 int cpu; in htab_percpu_map_seq_show_elem() local
2465 l = __htab_map_lookup_elem(map, key); in htab_percpu_map_seq_show_elem()
2471 btf_type_seq_show(map->btf, map->btf_key_type_id, key, m); in htab_percpu_map_seq_show_elem()
2473 pptr = htab_elem_get_ptr(l, map->key_size); in htab_percpu_map_seq_show_elem()
2474 for_each_possible_cpu(cpu) { in htab_percpu_map_seq_show_elem()
2475 seq_printf(m, "\tcpu%d: ", cpu); in htab_percpu_map_seq_show_elem()
2476 btf_type_seq_show(map->btf, map->btf_value_type_id, in htab_percpu_map_seq_show_elem()
2477 per_cpu_ptr(pptr, cpu), m); in htab_percpu_map_seq_show_elem()
2528 if (attr->value_size != sizeof(u32)) in fd_htab_map_alloc_check()
2529 return -EINVAL; in fd_htab_map_alloc_check()
2533 static void fd_htab_map_free(struct bpf_map *map) in fd_htab_map_free() argument
2535 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in fd_htab_map_free()
2541 for (i = 0; i < htab->n_buckets; i++) { in fd_htab_map_free()
2545 void *ptr = fd_htab_map_get_ptr(map, l); in fd_htab_map_free()
2547 map->ops->map_fd_put_ptr(map, ptr, false); in fd_htab_map_free()
2551 htab_map_free(map); in fd_htab_map_free()
2555 int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value) in bpf_fd_htab_map_lookup_elem() argument
2560 if (!map->ops->map_fd_sys_lookup_elem) in bpf_fd_htab_map_lookup_elem()
2561 return -ENOTSUPP; in bpf_fd_htab_map_lookup_elem()
2564 ptr = htab_map_lookup_elem(map, key); in bpf_fd_htab_map_lookup_elem()
2566 *value = map->ops->map_fd_sys_lookup_elem(READ_ONCE(*ptr)); in bpf_fd_htab_map_lookup_elem()
2568 ret = -ENOENT; in bpf_fd_htab_map_lookup_elem()
2575 int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file, in bpf_fd_htab_map_update_elem() argument
2582 ptr = map->ops->map_fd_get_ptr(map, map_file, ufd); in bpf_fd_htab_map_update_elem()
2587 * htab map, and the following rcu_read_lock() is only used to avoid in bpf_fd_htab_map_update_elem()
2591 ret = htab_map_update_elem(map, key, &ptr, map_flags); in bpf_fd_htab_map_update_elem()
2594 map->ops->map_fd_put_ptr(map, ptr, false); in bpf_fd_htab_map_update_elem()
2601 struct bpf_map *map, *inner_map_meta; in htab_of_map_alloc() local
2603 inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd); in htab_of_map_alloc()
2607 map = htab_map_alloc(attr); in htab_of_map_alloc()
2608 if (IS_ERR(map)) { in htab_of_map_alloc()
2610 return map; in htab_of_map_alloc()
2613 map->inner_map_meta = inner_map_meta; in htab_of_map_alloc()
2615 return map; in htab_of_map_alloc()
2618 static void *htab_of_map_lookup_elem(struct bpf_map *map, void *key) in htab_of_map_lookup_elem() argument
2620 struct bpf_map **inner_map = htab_map_lookup_elem(map, key); in htab_of_map_lookup_elem()
2628 static int htab_of_map_gen_lookup(struct bpf_map *map, in htab_of_map_gen_lookup() argument
2635 (void *(*)(struct bpf_map *map, void *key))NULL)); in htab_of_map_gen_lookup()
2640 round_up(map->key_size, 8)); in htab_of_map_gen_lookup()
2643 return insn - insn_buf; in htab_of_map_gen_lookup()
2646 static void htab_of_map_free(struct bpf_map *map) in htab_of_map_free() argument
2648 bpf_map_meta_free(map->inner_map_meta); in htab_of_map_free()
2649 fd_htab_map_free(map); in htab_of_map_free()