hadoop ReadWriteDiskValidatorMetrics 源码

  • 2022-10-20
  • 浏览 (208)

haddop ReadWriteDiskValidatorMetrics 代码

文件路径:/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ReadWriteDiskValidatorMetrics.java

/**
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

package org.apache.hadoop.util;

import org.apache.hadoop.classification.VisibleForTesting;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.metrics2.MetricsInfo;
import org.apache.hadoop.metrics2.MetricsSystem;
import org.apache.hadoop.metrics2.annotation.Metric;
import org.apache.hadoop.metrics2.lib.*;

import java.util.HashMap;
import java.util.Map;

import static org.apache.hadoop.metrics2.lib.Interns.info;

/**
 * The metrics for a directory generated by {@link ReadWriteDiskValidator}.
 */
@InterfaceAudience.Private
public class ReadWriteDiskValidatorMetrics {
  @Metric("# of disk failure") MutableCounterInt failureCount;
  @Metric("Time of last failure") MutableGaugeLong lastFailureTime;

  private final MetricsRegistry registry;
  private static final MetricsInfo RECORD_INFO =
      info("ReadWriteDiskValidatorMetrics", "Metrics for the DiskValidator");

  private final int[] quantileIntervals = new int[] {
      60 * 60, // 1h
      24 * 60 * 60, //1 day
      10 * 24 * 60 * 60 //10 day
  };
  private final MutableQuantiles[] fileReadQuantiles;
  private final MutableQuantiles[] fileWriteQuantiles;

  public ReadWriteDiskValidatorMetrics() {
    registry = new MetricsRegistry(RECORD_INFO);

    fileReadQuantiles = new MutableQuantiles[quantileIntervals.length];
    for (int i = 0; i < fileReadQuantiles.length; i++) {
      int interval = quantileIntervals[i];
      fileReadQuantiles[i] = registry.newQuantiles(
          "readLatency" + interval + "s",
          "File read latency", "Ops", "latencyMicros", interval);
    }

    fileWriteQuantiles = new MutableQuantiles[quantileIntervals.length];
    for (int i = 0; i < fileWriteQuantiles.length; i++) {
      int interval = quantileIntervals[i];
      fileWriteQuantiles[i] = registry.newQuantiles(
          "writeLatency" + interval + "s",
          "File write latency", "Ops", "latencyMicros", interval);
    }
  }

  /**
   * Simple metrics cache to help prevent re-registrations and help to access
   * metrics.
   */
  protected final static Map<String, ReadWriteDiskValidatorMetrics> DIR_METRICS
      = new HashMap<>();

  /**
   * Get a metric by given directory name.
   *
   * @param dirName directory name
   * @return the metric
   */
  public synchronized static ReadWriteDiskValidatorMetrics getMetric(
      String dirName) {
    MetricsSystem ms = DefaultMetricsSystem.instance();

    ReadWriteDiskValidatorMetrics metrics = DIR_METRICS.get(dirName);
    if (metrics == null) {
      metrics = new ReadWriteDiskValidatorMetrics();

      // Register with the MetricsSystems
      if (ms != null) {
        metrics = ms.register(sourceName(dirName),
            "Metrics for directory: " + dirName, metrics);
      }
      DIR_METRICS.put(dirName, metrics);
    }

    return metrics;
  }

  /**
   * Add the file write latency to {@link MutableQuantiles} metrics.
   *
   * @param writeLatency file write latency in microseconds
   */
  public void addWriteFileLatency(long writeLatency) {
    if (fileWriteQuantiles != null) {
      for (MutableQuantiles q : fileWriteQuantiles) {
        q.add(writeLatency);
      }
    }
  }

  /**
   * Add the file read latency to {@link MutableQuantiles} metrics.
   *
   * @param readLatency file read latency in microseconds
   */
  public void addReadFileLatency(long readLatency) {
    if (fileReadQuantiles!= null) {
      for (MutableQuantiles q : fileReadQuantiles) {
        q.add(readLatency);
      }
    }
  }

  /**
   * Get a source name by given directory name.
   *
   * @param dirName directory name
   * @return the source name
   */
  protected static String sourceName(String dirName) {
    StringBuilder sb = new StringBuilder(RECORD_INFO.name());
    sb.append(",dir=").append(dirName);
    return sb.toString();
  }

  /**
   * Increase the failure count and update the last failure timestamp.
   */
  public void diskCheckFailed() {
    failureCount.incr();
    lastFailureTime.set(System.nanoTime());
  }

  /**
   * Get {@link MutableQuantiles} metrics for the file read time.
   *
   * @return {@link MutableQuantiles} metrics for the file read time
   */
  @VisibleForTesting
  protected MutableQuantiles[] getFileReadQuantiles() {
    return fileReadQuantiles;
  }

  /**
   * Get {@link MutableQuantiles} metrics for the file write time.
   *
   * @return {@link MutableQuantiles} metrics for the file write time
   */
  @VisibleForTesting
  protected MutableQuantiles[] getFileWriteQuantiles() {
    return fileWriteQuantiles;
  }
}

相关信息

hadoop 源码目录

相关文章

hadoop ApplicationClassLoader 源码

hadoop AsyncDiskService 源码

hadoop AutoCloseableLock 源码

hadoop BasicDiskValidator 源码

hadoop BlockingThreadPoolExecutorService 源码

hadoop CacheableIPList 源码

hadoop ChunkedArrayList 源码

hadoop ClassUtil 源码

hadoop Classpath 源码

hadoop CleanerUtil 源码

0  赞