tools/lib/lockdep: Fix compilation for 4.11

- More rcu stubs
 - New dummy headers due to sched header split
 - jhash2 included in due to kernel lockdep inclusion and usage

Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: a.p.zijlstra@chello.nl
Cc: ben@decadent.org.uk
Link: http://lkml.kernel.org/r/20170525130005.5947-13-alexander.levin@verizon.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Levin, Alexander (Sasha Levin) 2017-05-25 12:58:47 +00:00 committed by Ingo Molnar
parent 20fb654aef
commit 6c8e648385
7 changed files with 237 additions and 3 deletions

View File

@ -61,4 +61,14 @@ static inline unsigned fls_long(unsigned long l)
return fls64(l);
}
/**
* rol32 - rotate a 32-bit value left
* @word: value to rotate
* @shift: bits to roll
*/
static inline __u32 rol32(__u32 word, unsigned int shift)
{
return (word << shift) | (word >> ((-shift) & 31));
}
#endif

175
tools/include/linux/jhash.h Normal file
View File

@ -0,0 +1,175 @@
#ifndef _LINUX_JHASH_H
#define _LINUX_JHASH_H
/* jhash.h: Jenkins hash support.
*
* Copyright (C) 2006. Bob Jenkins (bob_jenkins@burtleburtle.net)
*
* http://burtleburtle.net/bob/hash/
*
* These are the credits from Bob's sources:
*
* lookup3.c, by Bob Jenkins, May 2006, Public Domain.
*
* These are functions for producing 32-bit hashes for hash table lookup.
* hashword(), hashlittle(), hashlittle2(), hashbig(), mix(), and final()
* are externally useful functions. Routines to test the hash are included
* if SELF_TEST is defined. You can use this free for any purpose. It's in
* the public domain. It has no warranty.
*
* Copyright (C) 2009-2010 Jozsef Kadlecsik (kadlec@blackhole.kfki.hu)
*
* I've modified Bob's hash to be useful in the Linux kernel, and
* any bugs present are my fault.
* Jozsef
*/
#include <linux/bitops.h>
#include <linux/unaligned/packed_struct.h>
/* Best hash sizes are of power of two */
#define jhash_size(n) ((u32)1<<(n))
/* Mask the hash value, i.e (value & jhash_mask(n)) instead of (value % n) */
#define jhash_mask(n) (jhash_size(n)-1)
/* __jhash_mix -- mix 3 32-bit values reversibly. */
#define __jhash_mix(a, b, c) \
{ \
a -= c; a ^= rol32(c, 4); c += b; \
b -= a; b ^= rol32(a, 6); a += c; \
c -= b; c ^= rol32(b, 8); b += a; \
a -= c; a ^= rol32(c, 16); c += b; \
b -= a; b ^= rol32(a, 19); a += c; \
c -= b; c ^= rol32(b, 4); b += a; \
}
/* __jhash_final - final mixing of 3 32-bit values (a,b,c) into c */
#define __jhash_final(a, b, c) \
{ \
c ^= b; c -= rol32(b, 14); \
a ^= c; a -= rol32(c, 11); \
b ^= a; b -= rol32(a, 25); \
c ^= b; c -= rol32(b, 16); \
a ^= c; a -= rol32(c, 4); \
b ^= a; b -= rol32(a, 14); \
c ^= b; c -= rol32(b, 24); \
}
/* An arbitrary initial parameter */
#define JHASH_INITVAL 0xdeadbeef
/* jhash - hash an arbitrary key
* @k: sequence of bytes as key
* @length: the length of the key
* @initval: the previous hash, or an arbitray value
*
* The generic version, hashes an arbitrary sequence of bytes.
* No alignment or length assumptions are made about the input key.
*
* Returns the hash value of the key. The result depends on endianness.
*/
static inline u32 jhash(const void *key, u32 length, u32 initval)
{
u32 a, b, c;
const u8 *k = key;
/* Set up the internal state */
a = b = c = JHASH_INITVAL + length + initval;
/* All but the last block: affect some 32 bits of (a,b,c) */
while (length > 12) {
a += __get_unaligned_cpu32(k);
b += __get_unaligned_cpu32(k + 4);
c += __get_unaligned_cpu32(k + 8);
__jhash_mix(a, b, c);
length -= 12;
k += 12;
}
/* Last block: affect all 32 bits of (c) */
/* All the case statements fall through */
switch (length) {
case 12: c += (u32)k[11]<<24;
case 11: c += (u32)k[10]<<16;
case 10: c += (u32)k[9]<<8;
case 9: c += k[8];
case 8: b += (u32)k[7]<<24;
case 7: b += (u32)k[6]<<16;
case 6: b += (u32)k[5]<<8;
case 5: b += k[4];
case 4: a += (u32)k[3]<<24;
case 3: a += (u32)k[2]<<16;
case 2: a += (u32)k[1]<<8;
case 1: a += k[0];
__jhash_final(a, b, c);
case 0: /* Nothing left to add */
break;
}
return c;
}
/* jhash2 - hash an array of u32's
* @k: the key which must be an array of u32's
* @length: the number of u32's in the key
* @initval: the previous hash, or an arbitray value
*
* Returns the hash value of the key.
*/
static inline u32 jhash2(const u32 *k, u32 length, u32 initval)
{
u32 a, b, c;
/* Set up the internal state */
a = b = c = JHASH_INITVAL + (length<<2) + initval;
/* Handle most of the key */
while (length > 3) {
a += k[0];
b += k[1];
c += k[2];
__jhash_mix(a, b, c);
length -= 3;
k += 3;
}
/* Handle the last 3 u32's: all the case statements fall through */
switch (length) {
case 3: c += k[2];
case 2: b += k[1];
case 1: a += k[0];
__jhash_final(a, b, c);
case 0: /* Nothing left to add */
break;
}
return c;
}
/* __jhash_nwords - hash exactly 3, 2 or 1 word(s) */
static inline u32 __jhash_nwords(u32 a, u32 b, u32 c, u32 initval)
{
a += initval;
b += initval;
c += initval;
__jhash_final(a, b, c);
return c;
}
static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval)
{
return __jhash_nwords(a, b, c, initval + JHASH_INITVAL + (3 << 2));
}
static inline u32 jhash_2words(u32 a, u32 b, u32 initval)
{
return __jhash_nwords(a, b, 0, initval + JHASH_INITVAL + (2 << 2));
}
static inline u32 jhash_1word(u32 a, u32 initval)
{
return __jhash_nwords(a, 0, 0, initval + JHASH_INITVAL + (1 << 2));
}
#endif /* _LINUX_JHASH_H */

View File

@ -0,0 +1,46 @@
#ifndef _LINUX_UNALIGNED_PACKED_STRUCT_H
#define _LINUX_UNALIGNED_PACKED_STRUCT_H
#include <linux/kernel.h>
struct __una_u16 { u16 x; } __packed;
struct __una_u32 { u32 x; } __packed;
struct __una_u64 { u64 x; } __packed;
static inline u16 __get_unaligned_cpu16(const void *p)
{
const struct __una_u16 *ptr = (const struct __una_u16 *)p;
return ptr->x;
}
static inline u32 __get_unaligned_cpu32(const void *p)
{
const struct __una_u32 *ptr = (const struct __una_u32 *)p;
return ptr->x;
}
static inline u64 __get_unaligned_cpu64(const void *p)
{
const struct __una_u64 *ptr = (const struct __una_u64 *)p;
return ptr->x;
}
static inline void __put_unaligned_cpu16(u16 val, void *p)
{
struct __una_u16 *ptr = (struct __una_u16 *)p;
ptr->x = val;
}
static inline void __put_unaligned_cpu32(u32 val, void *p)
{
struct __una_u32 *ptr = (struct __una_u32 *)p;
ptr->x = val;
}
static inline void __put_unaligned_cpu64(u64 val, void *p)
{
struct __una_u64 *ptr = (struct __una_u64 *)p;
ptr->x = val;
}
#endif /* _LINUX_UNALIGNED_PACKED_STRUCT_H */

View File

@ -1,3 +0,0 @@
/* empty file */

View File

@ -7,4 +7,6 @@
#define WRITE_ONCE(x, val) x=(val)
#define RCU_INIT_POINTER(p, v) p=(v)
#define __packed __attribute__((packed))
#endif

View File

@ -52,4 +52,6 @@ static inline int lockdep_warn(int condition)
#define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; })
#endif
#define pr_err(format, ...) fprintf (stderr, format, ## __VA_ARGS__)
#endif

View File

@ -18,4 +18,6 @@ static inline bool rcu_is_watching(void)
return false;
}
#define rcu_assign_pointer(p, v) ((p) = (v))
#endif