Loading include/linux/rhashtable.h +32 −26 Original line number Diff line number Diff line Loading @@ -84,7 +84,7 @@ struct bucket_table { struct lockdep_map dep_map; struct rhash_lock_head __rcu *buckets[] ____cacheline_aligned_in_smp; struct rhash_lock_head *buckets[] ____cacheline_aligned_in_smp; }; /* Loading Loading @@ -261,11 +261,11 @@ void rhashtable_free_and_destroy(struct rhashtable *ht, void *arg); void rhashtable_destroy(struct rhashtable *ht); struct rhash_lock_head __rcu **rht_bucket_nested(const struct bucket_table *tbl, struct rhash_lock_head **rht_bucket_nested(const struct bucket_table *tbl, unsigned int hash); struct rhash_lock_head __rcu **__rht_bucket_nested(const struct bucket_table *tbl, struct rhash_lock_head **__rht_bucket_nested(const struct bucket_table *tbl, unsigned int hash); struct rhash_lock_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht, struct rhash_lock_head **rht_bucket_nested_insert(struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash); Loading @@ -284,21 +284,21 @@ struct rhash_lock_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht, #define rht_entry(tpos, pos, member) \ ({ tpos = container_of(pos, typeof(*tpos), member); 1; }) static inline struct rhash_lock_head __rcu *const *rht_bucket( static inline struct rhash_lock_head *const *rht_bucket( const struct bucket_table *tbl, unsigned int hash) { return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) : &tbl->buckets[hash]; } static inline struct rhash_lock_head __rcu **rht_bucket_var( static inline struct rhash_lock_head **rht_bucket_var( struct bucket_table *tbl, unsigned int hash) { return unlikely(tbl->nest) ? __rht_bucket_nested(tbl, hash) : &tbl->buckets[hash]; } static inline struct rhash_lock_head __rcu **rht_bucket_insert( static inline struct rhash_lock_head **rht_bucket_insert( struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash) { return unlikely(tbl->nest) ? rht_bucket_nested_insert(ht, tbl, hash) : Loading Loading @@ -349,6 +349,12 @@ static inline void rht_unlock(struct bucket_table *tbl, local_bh_enable(); } static inline struct rhash_head __rcu *__rht_ptr( struct rhash_lock_head *const *bkt) { return (struct rhash_head __rcu *)((unsigned long)*bkt & ~BIT(0)); } /* * Where 'bkt' is a bucket and might be locked: * rht_ptr() dereferences that pointer and clears the lock bit. Loading @@ -356,30 +362,30 @@ static inline void rht_unlock(struct bucket_table *tbl, * access is guaranteed, such as when destroying the table. */ static inline struct rhash_head *rht_ptr( struct rhash_lock_head __rcu * const *bkt, struct rhash_lock_head *const *bkt, struct bucket_table *tbl, unsigned int hash) { const struct rhash_lock_head *p = rht_dereference_bucket_rcu(*bkt, tbl, hash); struct rhash_head __rcu *p = __rht_ptr(bkt); if ((((unsigned long)p) & ~BIT(0)) == 0) if (!p) return RHT_NULLS_MARKER(bkt); return (void *)(((unsigned long)p) & ~BIT(0)); return rht_dereference_bucket_rcu(p, tbl, hash); } static inline struct rhash_head *rht_ptr_exclusive( struct rhash_lock_head __rcu * const *bkt) struct rhash_lock_head *const *bkt) { const struct rhash_lock_head *p = rcu_dereference_protected(*bkt, 1); struct rhash_head __rcu *p = __rht_ptr(bkt); if (!p) return RHT_NULLS_MARKER(bkt); return (void *)(((unsigned long)p) & ~BIT(0)); return rcu_dereference_protected(p, 1); } static inline void rht_assign_locked(struct rhash_lock_head __rcu **bkt, static inline void rht_assign_locked(struct rhash_lock_head **bkt, struct rhash_head *obj) { struct rhash_head __rcu **p = (struct rhash_head __rcu **)bkt; Loading @@ -390,7 +396,7 @@ static inline void rht_assign_locked(struct rhash_lock_head __rcu **bkt, } static inline void rht_assign_unlock(struct bucket_table *tbl, struct rhash_lock_head __rcu **bkt, struct rhash_lock_head **bkt, struct rhash_head *obj) { struct rhash_head __rcu **p = (struct rhash_head __rcu **)bkt; Loading Loading @@ -587,7 +593,7 @@ static inline struct rhash_head *__rhashtable_lookup( .ht = ht, .key = key, }; struct rhash_lock_head __rcu * const *bkt; struct rhash_lock_head *const *bkt; struct bucket_table *tbl; struct rhash_head *he; unsigned int hash; Loading Loading @@ -703,7 +709,7 @@ static inline void *__rhashtable_insert_fast( .ht = ht, .key = key, }; struct rhash_lock_head __rcu **bkt; struct rhash_lock_head **bkt; struct rhash_head __rcu **pprev; struct bucket_table *tbl; struct rhash_head *head; Loading Loading @@ -989,7 +995,7 @@ static inline int __rhashtable_remove_fast_one( struct rhash_head *obj, const struct rhashtable_params params, bool rhlist) { struct rhash_lock_head __rcu **bkt; struct rhash_lock_head **bkt; struct rhash_head __rcu **pprev; struct rhash_head *he; unsigned int hash; Loading Loading @@ -1141,7 +1147,7 @@ static inline int __rhashtable_replace_fast( struct rhash_head *obj_old, struct rhash_head *obj_new, const struct rhashtable_params params) { struct rhash_lock_head __rcu **bkt; struct rhash_lock_head **bkt; struct rhash_head __rcu **pprev; struct rhash_head *he; unsigned int hash; Loading lib/rhashtable.c +17 −16 Original line number Diff line number Diff line Loading @@ -34,7 +34,7 @@ union nested_table { union nested_table __rcu *table; struct rhash_lock_head __rcu *bucket; struct rhash_lock_head *bucket; }; static u32 head_hashfn(struct rhashtable *ht, Loading Loading @@ -131,7 +131,7 @@ static union nested_table *nested_table_alloc(struct rhashtable *ht, INIT_RHT_NULLS_HEAD(ntbl[i].bucket); } if (cmpxchg(prev, NULL, ntbl) == NULL) if (cmpxchg((union nested_table **)prev, NULL, ntbl) == NULL) return ntbl; /* Raced with another thread. */ kfree(ntbl); Loading Loading @@ -216,7 +216,7 @@ static struct bucket_table *rhashtable_last_table(struct rhashtable *ht, } static int rhashtable_rehash_one(struct rhashtable *ht, struct rhash_lock_head __rcu **bkt, struct rhash_lock_head **bkt, unsigned int old_hash) { struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); Loading Loading @@ -269,7 +269,7 @@ static int rhashtable_rehash_chain(struct rhashtable *ht, unsigned int old_hash) { struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); struct rhash_lock_head __rcu **bkt = rht_bucket_var(old_tbl, old_hash); struct rhash_lock_head **bkt = rht_bucket_var(old_tbl, old_hash); int err; if (!bkt) Loading @@ -296,7 +296,8 @@ static int rhashtable_rehash_attach(struct rhashtable *ht, * rcu_assign_pointer(). */ if (cmpxchg(&old_tbl->future_tbl, NULL, new_tbl) != NULL) if (cmpxchg((struct bucket_table **)&old_tbl->future_tbl, NULL, new_tbl) != NULL) return -EEXIST; return 0; Loading Loading @@ -478,7 +479,7 @@ static int rhashtable_insert_rehash(struct rhashtable *ht, } static void *rhashtable_lookup_one(struct rhashtable *ht, struct rhash_lock_head __rcu **bkt, struct rhash_lock_head **bkt, struct bucket_table *tbl, unsigned int hash, const void *key, struct rhash_head *obj) { Loading Loading @@ -529,7 +530,7 @@ static void *rhashtable_lookup_one(struct rhashtable *ht, } static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht, struct rhash_lock_head __rcu **bkt, struct rhash_lock_head **bkt, struct bucket_table *tbl, unsigned int hash, struct rhash_head *obj, Loading Loading @@ -584,7 +585,7 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key, { struct bucket_table *new_tbl; struct bucket_table *tbl; struct rhash_lock_head __rcu **bkt; struct rhash_lock_head **bkt; unsigned int hash; void *data; Loading Loading @@ -1166,7 +1167,7 @@ void rhashtable_destroy(struct rhashtable *ht) } EXPORT_SYMBOL_GPL(rhashtable_destroy); struct rhash_lock_head __rcu **__rht_bucket_nested(const struct bucket_table *tbl, struct rhash_lock_head **__rht_bucket_nested(const struct bucket_table *tbl, unsigned int hash) { const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); Loading Loading @@ -1195,10 +1196,10 @@ struct rhash_lock_head __rcu **__rht_bucket_nested(const struct bucket_table *tb } EXPORT_SYMBOL_GPL(__rht_bucket_nested); struct rhash_lock_head __rcu **rht_bucket_nested(const struct bucket_table *tbl, struct rhash_lock_head **rht_bucket_nested(const struct bucket_table *tbl, unsigned int hash) { static struct rhash_lock_head __rcu *rhnull; static struct rhash_lock_head *rhnull; if (!rhnull) INIT_RHT_NULLS_HEAD(rhnull); Loading @@ -1206,7 +1207,7 @@ struct rhash_lock_head __rcu **rht_bucket_nested(const struct bucket_table *tbl, } EXPORT_SYMBOL_GPL(rht_bucket_nested); struct rhash_lock_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht, struct rhash_lock_head **rht_bucket_nested_insert(struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash) { Loading Loading
include/linux/rhashtable.h +32 −26 Original line number Diff line number Diff line Loading @@ -84,7 +84,7 @@ struct bucket_table { struct lockdep_map dep_map; struct rhash_lock_head __rcu *buckets[] ____cacheline_aligned_in_smp; struct rhash_lock_head *buckets[] ____cacheline_aligned_in_smp; }; /* Loading Loading @@ -261,11 +261,11 @@ void rhashtable_free_and_destroy(struct rhashtable *ht, void *arg); void rhashtable_destroy(struct rhashtable *ht); struct rhash_lock_head __rcu **rht_bucket_nested(const struct bucket_table *tbl, struct rhash_lock_head **rht_bucket_nested(const struct bucket_table *tbl, unsigned int hash); struct rhash_lock_head __rcu **__rht_bucket_nested(const struct bucket_table *tbl, struct rhash_lock_head **__rht_bucket_nested(const struct bucket_table *tbl, unsigned int hash); struct rhash_lock_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht, struct rhash_lock_head **rht_bucket_nested_insert(struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash); Loading @@ -284,21 +284,21 @@ struct rhash_lock_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht, #define rht_entry(tpos, pos, member) \ ({ tpos = container_of(pos, typeof(*tpos), member); 1; }) static inline struct rhash_lock_head __rcu *const *rht_bucket( static inline struct rhash_lock_head *const *rht_bucket( const struct bucket_table *tbl, unsigned int hash) { return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) : &tbl->buckets[hash]; } static inline struct rhash_lock_head __rcu **rht_bucket_var( static inline struct rhash_lock_head **rht_bucket_var( struct bucket_table *tbl, unsigned int hash) { return unlikely(tbl->nest) ? __rht_bucket_nested(tbl, hash) : &tbl->buckets[hash]; } static inline struct rhash_lock_head __rcu **rht_bucket_insert( static inline struct rhash_lock_head **rht_bucket_insert( struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash) { return unlikely(tbl->nest) ? rht_bucket_nested_insert(ht, tbl, hash) : Loading Loading @@ -349,6 +349,12 @@ static inline void rht_unlock(struct bucket_table *tbl, local_bh_enable(); } static inline struct rhash_head __rcu *__rht_ptr( struct rhash_lock_head *const *bkt) { return (struct rhash_head __rcu *)((unsigned long)*bkt & ~BIT(0)); } /* * Where 'bkt' is a bucket and might be locked: * rht_ptr() dereferences that pointer and clears the lock bit. Loading @@ -356,30 +362,30 @@ static inline void rht_unlock(struct bucket_table *tbl, * access is guaranteed, such as when destroying the table. */ static inline struct rhash_head *rht_ptr( struct rhash_lock_head __rcu * const *bkt, struct rhash_lock_head *const *bkt, struct bucket_table *tbl, unsigned int hash) { const struct rhash_lock_head *p = rht_dereference_bucket_rcu(*bkt, tbl, hash); struct rhash_head __rcu *p = __rht_ptr(bkt); if ((((unsigned long)p) & ~BIT(0)) == 0) if (!p) return RHT_NULLS_MARKER(bkt); return (void *)(((unsigned long)p) & ~BIT(0)); return rht_dereference_bucket_rcu(p, tbl, hash); } static inline struct rhash_head *rht_ptr_exclusive( struct rhash_lock_head __rcu * const *bkt) struct rhash_lock_head *const *bkt) { const struct rhash_lock_head *p = rcu_dereference_protected(*bkt, 1); struct rhash_head __rcu *p = __rht_ptr(bkt); if (!p) return RHT_NULLS_MARKER(bkt); return (void *)(((unsigned long)p) & ~BIT(0)); return rcu_dereference_protected(p, 1); } static inline void rht_assign_locked(struct rhash_lock_head __rcu **bkt, static inline void rht_assign_locked(struct rhash_lock_head **bkt, struct rhash_head *obj) { struct rhash_head __rcu **p = (struct rhash_head __rcu **)bkt; Loading @@ -390,7 +396,7 @@ static inline void rht_assign_locked(struct rhash_lock_head __rcu **bkt, } static inline void rht_assign_unlock(struct bucket_table *tbl, struct rhash_lock_head __rcu **bkt, struct rhash_lock_head **bkt, struct rhash_head *obj) { struct rhash_head __rcu **p = (struct rhash_head __rcu **)bkt; Loading Loading @@ -587,7 +593,7 @@ static inline struct rhash_head *__rhashtable_lookup( .ht = ht, .key = key, }; struct rhash_lock_head __rcu * const *bkt; struct rhash_lock_head *const *bkt; struct bucket_table *tbl; struct rhash_head *he; unsigned int hash; Loading Loading @@ -703,7 +709,7 @@ static inline void *__rhashtable_insert_fast( .ht = ht, .key = key, }; struct rhash_lock_head __rcu **bkt; struct rhash_lock_head **bkt; struct rhash_head __rcu **pprev; struct bucket_table *tbl; struct rhash_head *head; Loading Loading @@ -989,7 +995,7 @@ static inline int __rhashtable_remove_fast_one( struct rhash_head *obj, const struct rhashtable_params params, bool rhlist) { struct rhash_lock_head __rcu **bkt; struct rhash_lock_head **bkt; struct rhash_head __rcu **pprev; struct rhash_head *he; unsigned int hash; Loading Loading @@ -1141,7 +1147,7 @@ static inline int __rhashtable_replace_fast( struct rhash_head *obj_old, struct rhash_head *obj_new, const struct rhashtable_params params) { struct rhash_lock_head __rcu **bkt; struct rhash_lock_head **bkt; struct rhash_head __rcu **pprev; struct rhash_head *he; unsigned int hash; Loading
lib/rhashtable.c +17 −16 Original line number Diff line number Diff line Loading @@ -34,7 +34,7 @@ union nested_table { union nested_table __rcu *table; struct rhash_lock_head __rcu *bucket; struct rhash_lock_head *bucket; }; static u32 head_hashfn(struct rhashtable *ht, Loading Loading @@ -131,7 +131,7 @@ static union nested_table *nested_table_alloc(struct rhashtable *ht, INIT_RHT_NULLS_HEAD(ntbl[i].bucket); } if (cmpxchg(prev, NULL, ntbl) == NULL) if (cmpxchg((union nested_table **)prev, NULL, ntbl) == NULL) return ntbl; /* Raced with another thread. */ kfree(ntbl); Loading Loading @@ -216,7 +216,7 @@ static struct bucket_table *rhashtable_last_table(struct rhashtable *ht, } static int rhashtable_rehash_one(struct rhashtable *ht, struct rhash_lock_head __rcu **bkt, struct rhash_lock_head **bkt, unsigned int old_hash) { struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); Loading Loading @@ -269,7 +269,7 @@ static int rhashtable_rehash_chain(struct rhashtable *ht, unsigned int old_hash) { struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); struct rhash_lock_head __rcu **bkt = rht_bucket_var(old_tbl, old_hash); struct rhash_lock_head **bkt = rht_bucket_var(old_tbl, old_hash); int err; if (!bkt) Loading @@ -296,7 +296,8 @@ static int rhashtable_rehash_attach(struct rhashtable *ht, * rcu_assign_pointer(). */ if (cmpxchg(&old_tbl->future_tbl, NULL, new_tbl) != NULL) if (cmpxchg((struct bucket_table **)&old_tbl->future_tbl, NULL, new_tbl) != NULL) return -EEXIST; return 0; Loading Loading @@ -478,7 +479,7 @@ static int rhashtable_insert_rehash(struct rhashtable *ht, } static void *rhashtable_lookup_one(struct rhashtable *ht, struct rhash_lock_head __rcu **bkt, struct rhash_lock_head **bkt, struct bucket_table *tbl, unsigned int hash, const void *key, struct rhash_head *obj) { Loading Loading @@ -529,7 +530,7 @@ static void *rhashtable_lookup_one(struct rhashtable *ht, } static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht, struct rhash_lock_head __rcu **bkt, struct rhash_lock_head **bkt, struct bucket_table *tbl, unsigned int hash, struct rhash_head *obj, Loading Loading @@ -584,7 +585,7 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key, { struct bucket_table *new_tbl; struct bucket_table *tbl; struct rhash_lock_head __rcu **bkt; struct rhash_lock_head **bkt; unsigned int hash; void *data; Loading Loading @@ -1166,7 +1167,7 @@ void rhashtable_destroy(struct rhashtable *ht) } EXPORT_SYMBOL_GPL(rhashtable_destroy); struct rhash_lock_head __rcu **__rht_bucket_nested(const struct bucket_table *tbl, struct rhash_lock_head **__rht_bucket_nested(const struct bucket_table *tbl, unsigned int hash) { const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); Loading Loading @@ -1195,10 +1196,10 @@ struct rhash_lock_head __rcu **__rht_bucket_nested(const struct bucket_table *tb } EXPORT_SYMBOL_GPL(__rht_bucket_nested); struct rhash_lock_head __rcu **rht_bucket_nested(const struct bucket_table *tbl, struct rhash_lock_head **rht_bucket_nested(const struct bucket_table *tbl, unsigned int hash) { static struct rhash_lock_head __rcu *rhnull; static struct rhash_lock_head *rhnull; if (!rhnull) INIT_RHT_NULLS_HEAD(rhnull); Loading @@ -1206,7 +1207,7 @@ struct rhash_lock_head __rcu **rht_bucket_nested(const struct bucket_table *tbl, } EXPORT_SYMBOL_GPL(rht_bucket_nested); struct rhash_lock_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht, struct rhash_lock_head **rht_bucket_nested_insert(struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash) { Loading