InfluxDB集群 -- /write写入数据源码分析(一)

a朋

Client通过POST /write向influxdb集群写入时序数据:

curl -i -XPOST 'http://localhost:8086/write?db=mydb' --data-binary 'cpu_load_short,host=server01,region=us-west value=0.64 1434055562000000000'

influxdb集群中的数据分shard在不同的节点上存储,client写入时序数据时(单条或批量):

  • 有的数据需要写入当前节点;
  • 有的数据需要写入远端节点;
  • 在写入时,所有shard都写入成功时,才认为该写入请求成功。

整体流程:

  • node1在8086上接收/write请求,然后根据写入的数据,确定数据存储到shard1和shard2上;
  • shard1和shard2都写入成功,才算数据写入成功;
  • 集群情况下,每个shard至少有2个replica,假设有2个replica,shard1存储到node1和node2上两份副本;
  • shard1有2个replica,shard1写入成功与request中传入的consistency有关;

consistency: 写入的一致性级别

consistency参数,由client在request中传入,标识了shard有N个replica的情况下,如何确定shard是否写入成功。

如果client没有传入consistency参数,server端默认ConsistencyLevelOne,即只要一个replica写入OK,就返回client成功。

consistency参数:

  • all: 所有的replica都写入成功则返回成功;
  • quorum: 大多数的replica写入成功,则返回成功;
  • one: 任何一个replica写入成功,则返回成功;
  • any: 任何一个replica写入成功,或者被写入Hinted-Handoff缓存,则返回成功;

以3节点,2replica为例:

Levelrequired
all2
quorum2
one1
any1
// cluster/points_writer.go
func (w *PointsWriter) writeToShard(shard *meta.ShardInfo, database, retentionPolicy string,
    consistency models.ConsistencyLevel, points []models.Point) error {
    // The required number of writes to achieve the requested consistency level
    required := len(shard.Owners)
    switch consistency {
    case models.ConsistencyLevelAny, models.ConsistencyLevelOne:
        required = 1
    case models.ConsistencyLevelQuorum:
        required = required/2 + 1
    }
    ......
}

数据写入:代码流程

POST /write的处理入口:

// services/http/handler.go
// NewHandler returns a new instance of handler with routes.
func NewHandler(c Config) *Handler {
    h := &Handler{
        mux:            pat.New(),
        Config:         &c,
        Logger:         zap.NewNop(),
        CLFLogger:      log.New(os.Stderr, "[httpd] ", 0),
        Store:          storage.NewStore(),
        stats:          &Statistics{},
        requestTracker: NewRequestTracker(),
        sema:           make(chan struct{}, 100),
    }
    h.AddRoutes([]Route{
        ......
        Route{
            "write", // Data-ingest route.
            "POST", "/write", true, writeLogEnabled, h.serveWrite,
        },
        ......
    }...)
    return h
}

具体的写操作在h.serveWrite():

// services/httpd/handler.go
// serveWrite receives incoming series data in line protocol format and writes it to the database.
func (h *Handler) serveWrite(w http.ResponseWriter, r *http.Request, user meta.User) {
    ......
    database := r.URL.Query().Get("db")
    if database == "" {
        h.httpError(w, "database is required", http.StatusBadRequest)
        return
    }
    if di := h.MetaClient.Database(database); di == nil {
        h.httpError(w, fmt.Sprintf("database not found: %q", database), http.StatusNotFound)
        return
    }    
    body := r.Body
    buf := bytes.NewBuffer(bs)
    _, err := buf.ReadFrom(body)
    
    points, parseError := models.ParsePointsWithPrecision(buf.Bytes(), time.Now().UTC(), r.URL.Query().Get("precision"))
    
    level := r.URL.Query().Get("consistency")
    // 默认的consistency
    consistency := models.ConsistencyLevelOne
    if level != "" {
        var err error
        consistency, err = models.ParseConsistencyLevel(level)        
    }
    // 写入points
    if err := h.PointsWriter.WritePoints(database, r.URL.Query().Get("rp"), consistency, user, points); influxdb.IsClientError(err) {
        atomic.AddInt64(&h.stats.PointsWrittenFail, int64(len(points)))
        h.httpError(w, err.Error(), http.StatusBadRequest)
        return
    }
    atomic.AddInt64(&h.stats.PointsWrittenOK, int64(len(points)))
    h.writeHeader(w, http.StatusNoContent)
}

由h.PointsWriter.WritePoints()写入points数据,代码走到cluster.WritePoints():

// cluster/points_writer.go
func (w *PointsWriter) WritePoints(database, retentionPolicy string, consistencyLevel models.ConsistencyLevel, user meta.User, points []models.Point) error {
    return w.WritePointsPrivileged(database, retentionPolicy, consistencyLevel, points)
}

写points:

  • 先查询points要存入的shard,需要metaClient读元数据;
  • 每个shard启动1个goroutine写points数据;
  • 若某个shard写错误,则返回err;仅所有shard都写入成功,返回成功;
// cluster/points_writer.go
func (w *PointsWriter) WritePointsPrivileged(database, retentionPolicy string, consistencyLevel models.ConsistencyLevel, points []models.Point) error {
    ...
    shardMappings, err := w.MapShards(&WritePointsRequest{Database: database, RetentionPolicy: retentionPolicy, Points: points})
    
    ch := make(chan error, len(shardMappings.Points))
    for shardID, points := range shardMappings.Points {
        // 每个shard启动1个goroutine去写
        go func(shard *meta.ShardInfo, database, retentionPolicy string, points []models.Point) {
            ch <- w.writeToShard(shard, database, retentionPolicy, consistencyLevel, points)
        }(shardMappings.Shards[shardID], database, retentionPolicy, points)
    }
    ...
    for range shardMappings.Points {
        select {
        case <-w.closing:
            return ErrWriteFailed
        //有shard返回err,则函数返回err
        case err := <-ch:
            if err != nil {
                return err
            }
        }
    }
    return nil
}    

写shard的流程:

  • 在写入shard时,由于shard有N个replica,写入成功依赖于consistencyLevel参数:

    • 根据shard owner和consistency计算required;
    • 最后write >= required,则认为本地写入成功;
  • 在集群场景下,某个shard可能归属于不同的node,也就是有不同的owner,每个owner都要写;
  • 如果当前节点是shard owner,使用TSDBStore.WriteToShard()执行本地写;
  • 如果远端节点是shard owner,则使用shardWriter.WriteShard()执行远端写;

    • 如果远端节点写入失败,则存入本机的hinted-handoff队列;
    • 后面等远端节点恢复时,再将队列中的数据写入远端节点;
// cluster/points_writer.go
func (w *PointsWriter) writeToShard(shard *meta.ShardInfo, database, retentionPolicy string,
    consistency models.ConsistencyLevel, points []models.Point) error {
    required := len(shard.Owners)
    ...
    switch consistency {
    case models.ConsistencyLevelAny, models.ConsistencyLevelOne:
        required = 1
    case models.ConsistencyLevelQuorum:
        required = required/2 + 1
    }
    ...
    ch := make(chan *AsyncWriteResult, len(shard.Owners))
    for _, owner := range shard.Owners {
        go func(shardID uint64, owner meta.ShardOwner, points []models.Point) {
            //当前节点写shard
            if w.Node.GetDataID() == owner.NodeID {
                err := w.TSDBStore.WriteToShard(shardID, points)
                if err == tsdb.ErrShardNotFound {
                    err = w.TSDBStore.CreateShard(database, retentionPolicy, shardID, true)
                    ...
                    err = w.TSDBStore.WriteToShard(shardID, points)
                }
                ch <- &AsyncWriteResult{owner, err}
                return
            }
            //远端节点写shard
            err := w.ShardWriter.WriteShard(shardID, owner.NodeID, points)            
            if err != nil{
                //远端节点写入失败,则写入本地的hh队列
                hherr := w.HintedHandoff.WriteShard(shardID, owner.NodeID, points)
                .....
            }
            ch <- &AsyncWriteResult{owner, err}
        }(shard.ID, owner, points)
    }
    var wrote int
    for range shard.Owners {
        select {
        case result := <-ch:
            wrote++
            // 写入成功的次数 >= required
            if wrote >= required {
                return nil
            }
        }
    }
    ...
    return ErrWriteFailed
} 
阅读 328
10 声望
5 粉丝
0 条评论
10 声望
5 粉丝
文章目录
宣传栏