数据结构
JDK1.8ConcurrentHashMap采用数组+单链表+红黑树的数据结构,数组和链表存储的是一个个Node对象,红黑树存储的是TreeNode对象
static class Node<K,V> implements Map.Entry<K,V> {
final int hash;
final K key;
volatile V val;
volatile Node<K,V> next;
}
static final class TreeNode<K,V> extends Node<K,V> {
TreeNode<K,V> parent; // red-black tree links
TreeNode<K,V> left;
TreeNode<K,V> right;
TreeNode<K,V> prev; // needed to unlink next upon deletion
boolean red;
TreeNode(int hash, K key, V val, Node<K,V> next,
TreeNode<K,V> parent) {
super(hash, key, val, next);
this.parent = parent;
}
}
常用方法
使用
源码分析
主要属性
//最大容量
static final int MAXIMUM_CAPACITY = 1 << 30;
//默认容量
static final int DEFAULT_INITIAL_CAPACITY = 16;
//数组最大容量
static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8;
//加载因子
static final float DEFAULT_LOAD_FACTOR = 0.75f;
// 链表的树化阈值,即链表转成红黑树的阈值,当Node链表长度>该值时,则将链表转换成红黑树
static final int TREEIFY_THRESHOLD = 8;
// 链表的还原阈值,即红黑树转为链表的阈值,当在扩容时,HashMap的数据存储位置会重新计算,在重新计算存储位置后,当红黑树内TreeNode数量 < 6时,则将 红黑树转换成链表
static final int UNTREEIFY_THRESHOLD = 6;
// 最小链表树化容量阈值,即 当Node数组长度 > 该值时,才允许树形化链表,否则则直接扩容,而不是树形化
static final int MIN_TREEIFY_CAPACITY = 64;
构造方法
public ConcurrentHashMap() {
}
public ConcurrentHashMap(int initialCapacity) {
if (initialCapacity < 0)
throw new IllegalArgumentException();
int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ?
MAXIMUM_CAPACITY :
tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1));
this.sizeCtl = cap;
}
public ConcurrentHashMap(Map<? extends K, ? extends V> m) {
this.sizeCtl = DEFAULT_CAPACITY;
putAll(m);
}
public ConcurrentHashMap(int initialCapacity, float loadFactor) {
this(initialCapacity, loadFactor, 1);
}
public ConcurrentHashMap(int initialCapacity,
float loadFactor, int concurrencyLevel) {
if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0)
throw new IllegalArgumentException();
if (initialCapacity < concurrencyLevel) // Use at least as many bins
initialCapacity = concurrencyLevel; // as estimated threads
long size = (long)(1.0 + (long)initialCapacity / loadFactor);
int cap = (size >= (long)MAXIMUM_CAPACITY) ?
MAXIMUM_CAPACITY : tableSizeFor((int)size);
this.sizeCtl = cap;
}
put()方法
public V put(K key, V value) {
return putVal(key, value, false);
}
final V putVal(K key, V value, boolean onlyIfAbsent) {
if (key == null || value == null) throw new NullPointerException();
int hash = spread(key.hashCode());
int binCount = 0;
//死循环
for (Node<K,V>[] tab = table;;) {
Node<K,V> f; int n, i, fh;
//1.Node数组初始化
if (tab == null || (n = tab.length) == 0)
tab = initTable();
//2.计算key存放Node数组中的数组下标,判断这个数组下标Node数组上是否有Node存在
else if ((f = tabAt(tab, i = (n - 1) & hash)) == null) {
//2.1若不存在,说明没有hash冲突,则表示当前位置可以写入数据,利用CAS尝试写入,失败则自旋保证成功
if (casTabAt(tab, i, null,
new Node<K,V>(hash, key, value, null)))
break; // no lock when adding to empty bin
}
//3.当前位置的hashcode==MOVED==-1,则进行扩容
else if ((fh = f.hash) == MOVED)
tab = helpTransfer(tab, f);
else {
//4.存在hash冲突,利用synchronized锁锁住链表或者红黑树的头结点写入数据
V oldVal = null;
synchronized (f) {
if (tabAt(tab, i) == f) {
//4.1当前是Node是链表
if (fh >= 0) {
binCount = 1;
//遍历以该Node为头结点的链表,判断该key是否已存在
for (Node<K,V> e = f;; ++binCount) {
K ek;
////若该key已存在,则用新value替换旧value
if (e.hash == hash &&
((ek = e.key) == key ||
(ek != null && key.equals(ek)))) {
oldVal = e.val;
if (!onlyIfAbsent)
e.val = value;
break;
}
Node<K,V> pred = e;
//若该key不存在,则将key-value添加到Node数组中,这里采用尾插法
if ((e = e.next) == null) {
pred.next = new Node<K,V>(hash, key,
value, null);
break;
}
}
}
//4.1当前是Node是红黑树
else if (f instanceof TreeBin) {
Node<K,V> p;
binCount = 2;
////向红黑树插入或更新数据(键值对),遍历红黑树判断该节点的key是否与传入key相同,相同则新value覆盖旧value,不相同则插入
if ((p = ((TreeBin<K,V>)f).putTreeVal(hash, key,
value)) != null) {
oldVal = p.val;
if (!onlyIfAbsent)
p.val = value;
}
}
}
}
//6.如果链表中的Node节点>8则需要转换为红黑树
if (binCount != 0) {
if (binCount >= TREEIFY_THRESHOLD)
treeifyBin(tab, i);
if (oldVal != null)
return oldVal;
break;
}
}
}
addCount(1L, binCount);
return null;
}
sizeCtl值含义:
-1:表示正在初始化
-n:表示正在扩容
0:表示还未初始化,默认值
大于0:表示下一次扩容的阈值
initTable()方法
private final Node<K,V>[] initTable() {
Node<K,V>[] tab; int sc;
while ((tab = table) == null || tab.length == 0) {
//若当前有其他线程正在初始化,则让出CPU执行权,然后自旋
if ((sc = sizeCtl) < 0)
Thread.yield(); // lost initialization race; just spin
//若当前有没有其他线程正在初始化,将sizeCtl置为-1,相当于拿到了锁
else if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
try {
if ((tab = table) == null || tab.length == 0) {
int n = (sc > 0) ? sc : DEFAULT_CAPACITY;
//初始化数组大小为16
@SuppressWarnings("unchecked")
Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n];
table = tab = nt;
//下一次扩容的大小,0.75n,和以前的扩容阀值相对应
sc = n - (n >>> 2);
}
} finally {
sizeCtl = sc;
}
break;
}
}
return tab;
}
get()方法
public V get(Object key) {
Node<K,V>[] tab; Node<K,V> e, p; int n, eh; K ek;
//计算key存放Node数组中的数组下标,判断这个数组下标Node数组上是否有Node存在
int h = spread(key.hashCode());
if ((tab = table) != null && (n = tab.length) > 0 &&
(e = tabAt(tab, (n - 1) & h)) != null) {
1.在Node数组中找key相等的Node
if ((eh = e.hash) == h) {
if ((ek = e.key) == key || (ek != null && key.equals(ek)))
return e.val;
}
//2.在红黑树中找key相等的Node
else if (eh < 0)
return (p = e.find(h, key)) != null ? p.val : null;
//3.在链表中找key相等的Node
while ((e = e.next) != null) {
if (e.hash == h &&
((ek = e.key) == key || (ek != null && key.equals(ek))))
return e.val;
}
}
return null;
}
结论
1.JDK1.8ConcurrentHashMap采用数组+单链表+红黑树的数据结构,数组和链表存储的是一个个Node对象,红黑树存储的是TreeNode对象
2.采用了CAS + synchronized来保证并发安全性
3.添加key-value时会根据key值计算出对应的hash值,根据hash值计算出对应的Node数组下标,判断这个数组下标Node数组上是否有Node存在,若不存在,说明没有hash冲突,则表示当前位置可以写入数据,利用CAS尝试写入,失败则自旋保证成功;若存在说明有hash冲突,利用synchronized锁锁住链表或者红黑树的头结点写入数据
ConcurrentHashMap1.8与ConcurrentHashMap1.7的区别:
1.1.7采用数组+链表,1.8采用数据+链表+红黑树优化了查询速度
2.1.7采用Segment分段锁,1.8采用CAS + synchronized降低锁的粒度:JDK1.7版本锁的粒度是基于Segment的,包含多个HashEntry,而JDK1.8锁的粒度就是HashEntry
**粗体** _斜体_ [链接](http://example.com) `代码` - 列表 > 引用
。你还可以使用@
来通知其他用户。