jdk1.7 concurrenthashmap
构造函数
参数有总容量,segment负载因子,并发级别,因为segment数组初始化后无法扩容,因此负载因子是segment内部的负载因子,并发级别是segment数组的大小。实际并发级别=大于等于给定并发级别的最小2的n次方,segment的实际容量=给定总容量/实际并发级别向上取整,大于等于该值的最小2的N次方。初始化时只会初始化第一个segment元素。
给定默认值:总容量是16,负载因子0.75,并发级别16。实际默认值:总容量32,因为segment的容量最小是2,为了防止插入第一个元素就扩容
并发级别最大值是1<<16=65536,总容量最大值是1<<30
public ConcurrentHashMap(int initialCapacity, float loadFactor, int concurrencyLevel) {
if (!(loadFactor > 0) || initialCapacity < 0 || concurrencyLevel <= 0)
throw new IllegalArgumentException();
if (concurrencyLevel > MAX_SEGMENTS)
concurrencyLevel = MAX_SEGMENTS;
// Find power-of-two sizes best matching arguments
int sshift = 0;
int ssize = 1;
while (ssize < concurrencyLevel) {
++sshift;
ssize <<= 1;
}
this.segmentShift = 32 - sshift;
this.segmentMask = ssize - 1;
if (initialCapacity > MAXIMUM_CAPACITY)
initialCapacity = MAXIMUM_CAPACITY;
int c = initialCapacity / ssize;
if (c * ssize < initialCapacity)
++c;
int cap = MIN_SEGMENT_TABLE_CAPACITY;
while (cap < c)
cap <<= 1;
// create segments and segments[0]
Segment<K,V> s0 =
new Segment<K,V>(loadFactor, (int)(cap * loadFactor),
(HashEntry<K,V>[])new HashEntry[cap]);
Segment<K,V>[] ss = (Segment<K,V>[])new Segment[ssize];
UNSAFE.putOrderedObject(ss, SBASE, s0); // ordered write of segments[0]
this.segments = ss;
}
put方法
主要逻辑分2步,首先根据hash值的高位确定segment下标的位置,再根据hash值的低位确定segment内部数组的索引,segment数组元素的创建的线程安全由cas+自旋来保证,对segment内部数组的访问通过加独占锁来保证线程安全
public V put(K key, V value) {
Segment<K,V> s;
if (value == null)
throw new NullPointerException();
int hash = hash(key);
int j = (hash >>> segmentShift) & segmentMask;
if ((s = (Segment<K,V>)UNSAFE.getObject // nonvolatile; recheck
(segments, (j << SSHIFT) + SBASE)) == null) // in ensureSegment
s = ensureSegment(j);
return s.put(key, hash, value, false);
}
segment内部是由数组+链表实现的,数组元素是链表的头结点,hash值的低位相同的会采取头插法放到同一链表中。如果hashEntry个数超过临界值(数组容量*负载因子),会触发rehash扩容。扩容新数组容量是原来的2倍,下标i处的元素会被分成2份放到新数组i和i+n下标处(原数组容量为n)。
final V put(K key, int hash, V value, boolean onlyIfAbsent) {
//先尝试获取独占锁,如果失败,再进一步处理
HashEntry<K,V> node = tryLock() ? null :
scanAndLockForPut(key, hash, value);
V oldValue;
try {
HashEntry<K,V>[] tab = table;
int index = (tab.length - 1) & hash;
HashEntry<K,V> first = entryAt(tab, index);
for (HashEntry<K,V> e = first;;) {
//分2种情况,key已有替换value和新加一个键值对
if (e != null) {
K k;
//如果key引用值相等,或者hash值相等并且key的equals方法相等
if ((k = e.key) == key ||
(e.hash == hash && key.equals(k))) {
oldValue = e.value;
if (!onlyIfAbsent) {
e.value = value;
++modCount;
}
break;
}
e = e.next;
}
else {
//头插法,设置新节点next为链表头结点,在设置hashEntry元素为新节点的引用
if (node != null)
node.setNext(first);
else
node = new HashEntry<K,V>(hash, key, value, first);
int c = count + 1;
if (c > threshold && tab.length < MAXIMUM_CAPACITY)
//大于临界值,扩容
rehash(node);
else
setEntryAt(tab, index, node);
++modCount;
count = c;
oldValue = null;
break;
}
}
} finally {
unlock();
}
return oldValue;
}
segment创建
下面是外部segment的创建,它会根据当前segment[0]作为原型来创建,注意segment[0]可能已经扩过容了。
创建segment对象后,通过cas比较交换来修改segment数组元素,此时只要一个线程修改成功即可。
private Segment<K,V> ensureSegment(int k) {
final Segment<K,V>[] ss = this.segments;
long u = (k << SSHIFT) + SBASE; // raw offset
Segment<K,V> seg;
if ((seg = (Segment<K,V>)UNSAFE.getObjectVolatile(ss, u)) == null) {
Segment<K,V> proto = ss[0]; // use segment 0 as prototype
int cap = proto.table.length;
float lf = proto.loadFactor;
int threshold = (int)(cap * lf);
HashEntry<K,V>[] tab = (HashEntry<K,V>[])new HashEntry[cap];
if ((seg = (Segment<K,V>)UNSAFE.getObjectVolatile(ss, u))
== null) { // recheck
Segment<K,V> s = new Segment<K,V>(lf, threshold, tab);
while ((seg = (Segment<K,V>)UNSAFE.getObjectVolatile(ss, u))
== null) {
if (UNSAFE.compareAndSwapObject(ss, u, null, seg = s))
break;
}
}
}
return seg;
}
加锁逻辑
segment继承了ReetrantLock,循环tryLock获取锁,如果重试次数大于门限值,才会lock阻塞当前线程进入等待队列,门限值跟处理器个数有关,个数为1,门限值等于1,大于1,门限值为64,与此同时顺便把新加node给初始化了
private HashEntry<K,V> scanAndLockForPut(K key, int hash, V value) {
HashEntry<K,V> first = entryForHash(this, hash);
HashEntry<K,V> e = first;
HashEntry<K,V> node = null;
int retries = -1; // negative while locating node
while (!tryLock()) {
HashEntry<K,V> f; // to recheck first below
if (retries < 0) {
if (e == null) {
//顺便初始化node,不保证一定能成功
if (node == null) // speculatively create node
node = new HashEntry<K,V>(hash, key, value, null);
retries = 0;
}
else if (key.equals(e.key))
retries = 0;
else
e = e.next;
}
else if (++retries > MAX_SCAN_RETRIES) {
lock();
break;
}
else if ((retries & 1) == 0 &&
(f = entryForHash(this, hash)) != first) {
e = first = f; // re-traverse if entry changed
retries = -1;
}
}
return node;
}
扩容
private void rehash(HashEntry<K,V> node) {
HashEntry<K,V>[] oldTable = table;
int oldCapacity = oldTable.length;
int newCapacity = oldCapacity << 1;
threshold = (int)(newCapacity * loadFactor);
HashEntry<K,V>[] newTable =
(HashEntry<K,V>[]) new HashEntry[newCapacity];
int sizeMask = newCapacity - 1;
for (int i = 0; i < oldCapacity ; i++) {
HashEntry<K,V> e = oldTable[i];
if (e != null) {
HashEntry<K,V> next = e.next;
int idx = e.hash & sizeMask;
if (next == null) // Single node on list
newTable[idx] = e;
else { // Reuse consecutive sequence at same slot
//先获取到链表后面n个节点放到新数组中,这n个节点在新数组中的索引位置相同
HashEntry<K,V> lastRun = e;
int lastIdx = idx;
for (HashEntry<K,V> last = next;
last != null;
last = last.next) {
int k = last.hash & sizeMask;
if (k != lastIdx) {
lastIdx = k;
lastRun = last;
}
}
newTable[lastIdx] = lastRun;
// Clone remaining nodes
//处理lastRun前面的节点,位置取决于新索引值的高位是0还是1
for (HashEntry<K,V> p = e; p != lastRun; p = p.next) {
V v = p.value;
int h = p.hash;
int k = h & sizeMask;
HashEntry<K,V> n = newTable[k];
newTable[k] = new HashEntry<K,V>(h, p.key, v, n);
}
}
}
}
//扩容完成后,新节点计算索引值,并加到新数组
int nodeIndex = node.hash & sizeMask; // add the new node
node.setNext(newTable[nodeIndex]);
newTable[nodeIndex] = node;
table = newTable;
}
get方法
和put方法比,get简直不要太简单。首先根据hash值的高位得到segment元素索引,再根据hash值的低位获取到内部hashEntry表的元素索引,也即链表头结点,最后遍历链表得到key相同的节点value值
get方法没有加锁操作,通过unsafe#getObjectVolatile来保证数组元素的可见性
public V get(Object key) {
Segment<K,V> s; // manually integrate access methods to reduce overhead
HashEntry<K,V>[] tab;
int h = hash(key);
long u = (((h >>> segmentShift) & segmentMask) << SSHIFT) + SBASE;
if ((s = (Segment<K,V>)UNSAFE.getObjectVolatile(segments, u)) != null &&
(tab = s.table) != null) {
for (HashEntry<K,V> e = (HashEntry<K,V>) UNSAFE.getObjectVolatile
(tab, ((long)(((tab.length - 1) & h)) << TSHIFT) + TBASE);
e != null; e = e.next) {
K k;
if ((k = e.key) == key || (e.hash == h && key.equals(k)))
return e.value;
}
}
return null;
}
remove方法
也是先获取到Segment,再从Segment中删除
public V remove(Object key) {
int hash = hash(key);
Segment<K,V> s = segmentForHash(hash);
return s == null ? null : s.remove(key, hash, null);
}
和put一样,会对segment加锁。遍历链表时,保留当前节点的前置节点,如果找到key相等的节点,通过设置前置节点的next为当前节点的next来删除当前节点
final V remove(Object key, int hash, Object value) {
if (!tryLock())
scanAndLock(key, hash);
V oldValue = null;
try {
HashEntry<K,V>[] tab = table;
int index = (tab.length - 1) & hash;
HashEntry<K,V> e = entryAt(tab, index);
HashEntry<K,V> pred = null;
while (e != null) {
K k;
HashEntry<K,V> next = e.next;
if ((k = e.key) == key ||
(e.hash == hash && key.equals(k))) {
V v = e.value;
if (value == null || value == v || value.equals(v)) {
if (pred == null)
setEntryAt(tab, index, next);
else
pred.setNext(next);
++modCount;
--count;
oldValue = v;
}
break;
}
pred = e;
e = next;
}
} finally {
unlock();
}
return oldValue;
}
如何保证线程安全
put和remove之间根据ReentrantLock独占锁
put和get之间,put在修改hashEntry元素引用时使用unsafe#putOrderedObject,get使用unsafe#getObjectVolatile,rehash时先创建新数组,最后在一步赋值到table数组,table是通过volatile来保证可见性的
remove和get,如果删除的是头结点,remove通过unsafe来操作数组内部元素;如果不是头结点,它通过设置待删节点的next为前置节点,这里的并发性保证是next是volatile的
jdk1.8 concurrenthashmap
主要逻辑
它的无参构造函数不做任何处理,数组的初始化放到第一次put进行,没有了分段锁,采用数组+链表+红黑树,数组默认大小16。锁的粒度更小了,在每个数组元素上。链表采用尾插法(原因后面再说),链表转红黑树的条件是链表长度>=8且数组容量>=64。
线程安全通过cas+synchronized保证
构造函数
//默认构造函数啥也不做,sizectl=0
public ConcurrentHashMap() {
}
//指定初始容量,sizectl=实际容量,最大是1<<30
public ConcurrentHashMap(int initialCapacity) {
if (initialCapacity < 0)
throw new IllegalArgumentException();
int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ?
MAXIMUM_CAPACITY :
tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1));
this.sizeCtl = cap;
}
//根据map初始化,会根据map的大小计算出实际容量,并和默认容量16取较大值,应该是为了避免反复扩容
public ConcurrentHashMap(Map<? extends K, ? extends V> m) {
this.sizeCtl = DEFAULT_CAPACITY;
putAll(m);
}
public void putAll(Map<? extends K, ? extends V> m) {
tryPresize(m.size());
for (Map.Entry<? extends K, ? extends V> e : m.entrySet())
putVal(e.getKey(), e.getValue(), false);
}
private final void tryPresize(int size) {
int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY :
tableSizeFor(size + (size >>> 1) + 1);
int sc;
while ((sc = sizeCtl) >= 0) {
Node<K,V>[] tab = table; int n;
if (tab == null || (n = tab.length) == 0) {
//初始化
n = (sc > c) ? sc : c;
if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
try {
if (table == tab) {
@SuppressWarnings("unchecked")
Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n];
table = nt;
sc = n - (n >>> 2);
}
} finally {
sizeCtl = sc;
}
}
}
//...后面扩容用
}
}
//并发级别默认是1
public ConcurrentHashMap(int initialCapacity,
float loadFactor, int concurrencyLevel) {
if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0)
throw new IllegalArgumentException();
if (initialCapacity < concurrencyLevel) // Use at least as many bins
initialCapacity = concurrencyLevel; // as estimated threads
long size = (long)(1.0 + (long)initialCapacity / loadFactor);
int cap = (size >= (long)MAXIMUM_CAPACITY) ?
MAXIMUM_CAPACITY : tableSizeFor((int)size);
this.sizeCtl = cap;
}
put方法
主要逻辑是,如果table未初始化,则先初始化table,再根据hash值的低位得到数组索引,如果为空,直接cas替换,否则执行链表或红黑树插入。
插入需要加锁,链表首先遍历判断key是否重复,如果重复且onlyIfAbsent为false,替换之,否则采用尾插法插入。通过头结点的hash值是否大于0判断是链表还是红黑树。
链表插入还会计算链表长度,如果>=8,需进一步处理。
final V putVal(K key, V value, boolean onlyIfAbsent) {
if (key == null || value == null) throw new NullPointerException();
int hash = spread(key.hashCode());
//链表长度
int binCount = 0;
for (Node<K,V>[] tab = table;;) {
Node<K,V> f; int n, i, fh;
if (tab == null || (n = tab.length) == 0)
//初始化table
tab = initTable();
else if ((f = tabAt(tab, i = (n - 1) & hash)) == null) {
if (casTabAt(tab, i, null,
new Node<K,V>(hash, key, value, null)))
break; // no lock when adding to empty bin
}
else if ((fh = f.hash) == MOVED)
//hash值居然是-1,这个尚不清楚
tab = helpTransfer(tab, f);
else {
V oldVal = null;
synchronized (f) {
if (tabAt(tab, i) == f) {
if (fh >= 0) {
binCount = 1;
for (Node<K,V> e = f;; ++binCount) {
K ek;
if (e.hash == hash &&
((ek = e.key) == key ||
(ek != null && key.equals(ek)))) {
oldVal = e.val;
if (!onlyIfAbsent)
e.val = value;
break;
}
Node<K,V> pred = e;
if ((e = e.next) == null) {
pred.next = new Node<K,V>(hash, key,
value, null);
break;
}
}
}
//红黑树,头结点hash值也是负数
else if (f instanceof TreeBin) {
Node<K,V> p;
binCount = 2;
if ((p = ((TreeBin<K,V>)f).putTreeVal(hash, key,
value)) != null) {
oldVal = p.val;
if (!onlyIfAbsent)
p.val = value;
}
}
}
}
if (binCount != 0) {
if (binCount >= TREEIFY_THRESHOLD)
treeifyBin(tab, i);
if (oldVal != null)
return oldVal;
break;
}
}
}
//这个也不太清楚
addCount(1L, binCount);
return null;
}
初始化hash表
用到了sizectl,它有2个用途:
1、如果构造函数指定了初始容量,它就是实际容量
2、通过对它的cas操作来保证table数组的线程安全
private final Node<K,V>[] initTable() {
Node<K,V>[] tab; int sc;
while ((tab = table) == null || tab.length == 0) {
if ((sc = sizeCtl) < 0)
//已经被其他线程cas了,放弃CPU的使用权进入就绪队列
Thread.yield(); // lost initialization race; just spin
//cas将sizectl修改成-1
else if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
try {
if ((tab = table) == null || tab.length == 0) {
int n = (sc > 0) ? sc : DEFAULT_CAPACITY;
@SuppressWarnings("unchecked")
Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n];
table = tab = nt;
//n-0.25n=0.75n
sc = n - (n >>> 2);
}
} finally {
//改回sc,初始化sizectl是临界值,但是这个临界值好像没用到,扩容的条件是链表长度超过8且数组小于64
sizeCtl = sc;
}
break;
}
}
return tab;
}
红黑树的插入
分2步,BST的插入,如果插入节点的父节点是红色,还需要进一步调整使之满足红黑树的规则
final TreeNode<K,V> putTreeVal(int h, K k, V v) {
Class<?> kc = null;
boolean searched = false;
for (TreeNode<K,V> p = root;;) {
int dir, ph; K pk;
if (p == null) {
//空树
first = root = new TreeNode<K,V>(h, k, v, null, null);
break;
}
//比较大小会通过hash值,key的compareTo方法(如果实现了comparable),最后兜底的是key的className string和System.identityHashCode(key)
else if ((ph = p.hash) > h)
dir = -1;
else if (ph < h)
dir = 1;
else if ((pk = p.key) == k || (pk != null && k.equals(pk)))
return p;
else if ((kc == null &&
(kc = comparableClassFor(k)) == null) ||
(dir = compareComparables(kc, k, pk)) == 0) {
if (!searched) {
TreeNode<K,V> q, ch;
searched = true;
if (((ch = p.left) != null &&
(q = ch.findTreeNode(h, k, kc)) != null) ||
((ch = p.right) != null &&
(q = ch.findTreeNode(h, k, kc)) != null))
return q;
}
//兜底,里面调用了System.identityHashCode方法,该方法和默认的hashCode方法返回一样,而不管子类是否重写,null返回0
dir = tieBreakOrder(k, pk);
}
TreeNode<K,V> xp = p;
if ((p = (dir <= 0) ? p.left : p.right) == null) {
TreeNode<K,V> x, f = first;
first = x = new TreeNode<K,V>(h, k, v, f, xp);
if (f != null)
f.prev = x;
if (dir <= 0)
xp.left = x;
else
xp.right = x;
if (!xp.red)
x.red = true;
else {
lockRoot();
try {
root = balanceInsertion(root, x);
} finally {
unlockRoot();
}
}
break;
}
}
assert checkInvariants(root);
return null;
}
链表转红黑树
链表转红黑树的条件是链表长度大于等于8,且数组长度大于等于64,如果小于64,会扩容数组而不是转红黑树
private final void treeifyBin(Node<K,V>[] tab, int index) {
Node<K,V> b; int n, sc;
if (tab != null) {
if ((n = tab.length) < MIN_TREEIFY_CAPACITY)
//扩容没有加锁,通过cas保证线程安全
tryPresize(n << 1);
else if ((b = tabAt(tab, index)) != null && b.hash >= 0) {
synchronized (b) {
if (tabAt(tab, index) == b) {
TreeNode<K,V> hd = null, tl = null;
for (Node<K,V> e = b; e != null; e = e.next) {
//先根据链表node创建TreeNode链表,再根据TreeNode链表构造红黑树
TreeNode<K,V> p =
new TreeNode<K,V>(e.hash, e.key, e.val,
null, null);
if ((p.prev = tl) == null)
hd = p;
else
tl.next = p;
tl = p;
}
//红黑树的头结点是TreeBin对象,它的hash值是-2
setTabAt(tab, index, new TreeBin<K,V>(hd));
}
}
}
}
}
扩容
扩容时sizectl的作用:高16位是扩容标志位,是个负数,并且和原容量有关;低16位表示正在扩容的线程数+1(因为初始化是2,-1是用来表示正在初始化)
sizectl作用:用来控制初始化和扩容,如果是负数,表示正在初始化或扩容,-1初始化,扩容(-(正在扩容线程数+1))。初始化之前是容量大小,0表示默认容量,初始化之后是临界值=容量*0.75(这个临界值好像没有用到)
private final void tryPresize(int size) {
...
else if (tab == table) {
int rs = resizeStamp(n);
if (sc < 0) {
Node<K,V>[] nt;
if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 ||
sc == rs + MAX_RESIZERS || (nt = nextTable) == null ||
transferIndex <= 0)
break;
if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1))
transfer(tab, nt);
}
else if (U.compareAndSwapInt(this, SIZECTL, sc,
(rs << RESIZE_STAMP_SHIFT) + 2))
transfer(tab, null);
}
迁移
hash表的迁移会将数组分成若干个相同步长的小任务来迁移,这样做的目的是在多线程环境中,每个线程会领取未被领取的小任务,并发处理不同小任务,提高吞吐量,迁移的顺序是从后往前。
每个线程都会尽量去做所有任务,通过transferIndex来表示任务分配的进度,对transferIndex的操作是cas的
迁移都是将链表或红黑树一分为二,放到i或i+n处。红黑树迁移后还会判断如果节点数<=6,会将红黑树转回链表
transfer除了扩容方法tryPresize会调,putVal也会调,当判断到数组节点hash值=-1时,说明此时正在扩容,会调helpTransfer方法帮助扩容
迁移会逐渐把旧hash表中所有数组元素(包括为null的)替换成fwd对象,此对象的hash值是-1,表示正在扩容
private final void transfer(Node<K,V>[] tab, Node<K,V>[] nextTab) {
int n = tab.length, stride;
//步长:如果单核就是n,多核n>>>3/cpu个数,最小16
if ((stride = (NCPU > 1) ? (n >>> 3) / NCPU : n) < MIN_TRANSFER_STRIDE)
stride = MIN_TRANSFER_STRIDE; // subdivide range
if (nextTab == null) { // initiating
//transfer方法的调用者会保证第一次调用的线程传nextTab=null
try {
@SuppressWarnings("unchecked")
Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n << 1];
nextTab = nt;
} catch (Throwable ex) { // try to cope with OOME
sizeCtl = Integer.MAX_VALUE;
return;
}
nextTable = nextTab;
transferIndex = n;
}
int nextn = nextTab.length;
//通过将数组元素的hash值置为-1,来表示正在扩容
ForwardingNode<K,V> fwd = new ForwardingNode<K,V>(nextTab);
boolean advance = true;
boolean finishing = false; // to ensure sweep before committing nextTab
//小任务的范围是bound~i
for (int i = 0, bound = 0;;) {
Node<K,V> f; int fh;
//这个循环是用来获取任务边界的
while (advance) {
int nextIndex, nextBound;
//顺序从后往前
if (--i >= bound || finishing)
advance = false;
else if ((nextIndex = transferIndex) <= 0) {
i = -1;
advance = false;
}
//transferIndex之前的已经分配,之后的尚未分配
else if (U.compareAndSwapInt
(this, TRANSFERINDEX, nextIndex,
nextBound = (nextIndex > stride ?
nextIndex - stride : 0))) {
bound = nextBound;
i = nextIndex - 1;
advance = false;
}
}
if (i < 0 || i >= n || i + n >= nextn) {
int sc;
//finishing表示整个迁移任务结束,设置table和sizectl
if (finishing) {
nextTable = null;
table = nextTab;
sizeCtl = (n << 1) - (n >>> 1);
return;
}
//i已经走到了0之前,表示当前小任务做完了,
//并且所有任务都有线程在处理,将sizectl减1
//再判断如果sizectl低16位等于2,说明全部小任务做完了,否则直接return
if (U.compareAndSwapInt(this, SIZECTL, sc = sizeCtl, sc - 1)) {
if ((sc - 2) != resizeStamp(n) << RESIZE_STAMP_SHIFT)
return;
finishing = advance = true;
i = n; // recheck before commit
}
}
//i处是空节点,也会放入fwd
else if ((f = tabAt(tab, i)) == null)
advance = casTabAt(tab, i, null, fwd);
else if ((fh = f.hash) == MOVED)
advance = true; // already processed
else {
//迁移会在数组元素的粒度加锁
synchronized (f) {
if (tabAt(tab, i) == f) {
Node<K,V> ln, hn;
if (fh >= 0) {
//链表迁移,逻辑与jdk7 rehash几乎相同
int runBit = fh & n;
Node<K,V> lastRun = f;
for (Node<K,V> p = f.next; p != null; p = p.next) {
int b = p.hash & n;
if (b != runBit) {
runBit = b;
lastRun = p;
}
}
if (runBit == 0) {
ln = lastRun;
hn = null;
}
else {
hn = lastRun;
ln = null;
}
for (Node<K,V> p = f; p != lastRun; p = p.next) {
int ph = p.hash; K pk = p.key; V pv = p.val;
if ((ph & n) == 0)
ln = new Node<K,V>(ph, pk, pv, ln);
else
hn = new Node<K,V>(ph, pk, pv, hn);
}
setTabAt(nextTab, i, ln);
setTabAt(nextTab, i + n, hn);
//旧数组元素设为fwd
setTabAt(tab, i, fwd);
advance = true;
}
//红黑树迁移,遍历按照节点插入顺序
else if (f instanceof TreeBin) {
TreeBin<K,V> t = (TreeBin<K,V>)f;
TreeNode<K,V> lo = null, loTail = null;
TreeNode<K,V> hi = null, hiTail = null;
int lc = 0, hc = 0;
for (Node<K,V> e = t.first; e != null; e = e.next) {
int h = e.hash;
TreeNode<K,V> p = new TreeNode<K,V>
(h, e.key, e.val, null, null);
if ((h & n) == 0) {
if ((p.prev = loTail) == null)
lo = p;
else
loTail.next = p;
loTail = p;
++lc;
}
else {
if ((p.prev = hiTail) == null)
hi = p;
else
hiTail.next = p;
hiTail = p;
++hc;
}
}
//如果树的节点数过少,转回链表
ln = (lc <= UNTREEIFY_THRESHOLD) ? untreeify(lo) :
(hc != 0) ? new TreeBin<K,V>(lo) : t;
hn = (hc <= UNTREEIFY_THRESHOLD) ? untreeify(hi) :
(lc != 0) ? new TreeBin<K,V>(hi) : t;
setTabAt(nextTab, i, ln);
setTabAt(nextTab, i + n, hn);
setTabAt(tab, i, fwd);
advance = true;
}
}
}
}
}
}
get方法
get方法主要逻辑如下:
根据key的hashCode得到hash值,再(n-1)&hash得到hash表索引,如果为null直接返回null,如果头结点符合也直接返回,否则判断如果hash值小于0,说明正在扩容或者是红黑树,如果大于0说明是链表,直接遍历查找即可。
public V get(Object key) {
Node<K,V>[] tab; Node<K,V> e, p; int n, eh; K ek;
int h = spread(key.hashCode());
if ((tab = table) != null && (n = tab.length) > 0 &&
(e = tabAt(tab, (n - 1) & h)) != null) {
if ((eh = e.hash) == h) {
if ((ek = e.key) == key || (ek != null && key.equals(ek)))
return e.val;
}
else if (eh < 0)
//如果是扩容,掉的是ForwardingNode.find;如果是红黑树TreeBin.find
return (p = e.find(h, key)) != null ? p.val : null;
while ((e = e.next) != null) {
if (e.hash == h &&
((ek = e.key) == key || (ek != null && key.equals(ek))))
return e.val;
}
}
return null;
}
扩容查找
ForwardingNode里面存了扩容后的hash表的引用nextTable,扩容查找就是在这张表里查。
注意这里用到了java中的标签,continue+标签可以在多重循环中跳到指定循环的下一轮,标签是标识符+冒号
Node<K,V> find(int h, Object k) {
// loop to avoid arbitrarily deep recursion on forwarding nodes
outer: for (Node<K,V>[] tab = nextTable;;) {
Node<K,V> e; int n;
if (k == null || tab == null || (n = tab.length) == 0 ||
(e = tabAt(tab, (n - 1) & h)) == null)
return null;
for (;;) {
int eh; K ek;
if ((eh = e.hash) == h &&
((ek = e.key) == k || (ek != null && k.equals(ek))))
return e;
if (eh < 0) {
if (e instanceof ForwardingNode) {
//正在扩容的hash表又在扩容
tab = ((ForwardingNode<K,V>)e).nextTable;
continue outer;
}
else
return e.find(h, k);
}
if ((e = e.next) == null)
return null;
}
}
}
**粗体** _斜体_ [链接](http://example.com) `代码` - 列表 > 引用
。你还可以使用@
来通知其他用户。