org.apache.hadoop.fs.FileSystem.Statistics的实例源码

项目:angel    文件CounterUpdater.java   
void updateCounters() {
  String counterPrifix = schema.toupperCase() + "_";
  long readBytes = 0;
  long writeBytes = 0;
  long readOps = 0;
  long largeReadOps = 0;
  long writeOps = 0;
  for (FileSystem.Statistics stat : stats) {
    readBytes = readBytes + stat.getBytesRead();
    writeBytes = writeBytes + stat.getBytesWritten();
    readOps = readOps + stat.getReadOps();
    largeReadOps = largeReadOps + stat.getLargeReadOps();
    writeOps = writeOps + stat.getWriteOps();
  }
  PSAgentContext.get().getMetrics()
      .put(counterPrifix + AngelCounter.BYTES_READ,Long.toString(readBytes));
  PSAgentContext.get().getMetrics()
      .put(counterPrifix.toString() + AngelCounter.BYTES_WRITTEN,Long.toString(writeBytes));
  PSAgentContext.get().getMetrics()
      .put(counterPrifix + AngelCounter.READ_OPS,Long.toString(readOps));
  PSAgentContext.get().getMetrics()
      .put(counterPrifix + AngelCounter.LARGE_READ_OPS,Long.toString(largeReadOps));
  PSAgentContext.get().getMetrics()
      .put(counterPrifix + AngelCounter.WRITE_OPS,Long.toString(writeOps));
}
项目:hadoop-oss    文件FCStatisticsBaseTest.java   
@Test
public void testStatistics() throws IOException,URISyntaxException {
  URI fsUri = getFsUri();
  Statistics stats = FileContext.getStatistics(fsUri);
  Assert.assertEquals(0,stats.getBytesRead());
  Path filePath = fileContextTestHelper .getTestRootPath(fc,"file1");
  createFile(fc,filePath,numBlocks,blockSize);

  Assert.assertEquals(0,stats.getBytesRead());
  verifyWrittenBytes(stats);
  FSDataInputStream fstr = fc.open(filePath);
  byte[] buf = new byte[blockSize];
  int bytesRead = fstr.read(buf,blockSize);
  fstr.read(0,buf,blockSize);
  Assert.assertEquals(blockSize,bytesRead);
  verifyReadBytes(stats);
  verifyWrittenBytes(stats);
  verifyReadBytes(FileContext.getStatistics(getFsUri()));
  Map<URI,Statistics> statsMap = FileContext.getAllStatistics();
  URI exactUri = getSchemeAuthorityUri();
  verifyWrittenBytes(statsMap.get(exactUri));
  fc.delete(filePath,true);
}
项目:hadoop    文件MapTask.java   
TrackedRecordReader(TaskReporter reporter,JobConf job) 
  throws IOException{
  inputRecordCounter = reporter.getCounter(TaskCounter.MAP_INPUT_RECORDS);
  fileInputByteCounter = reporter.getCounter(FileInputFormatCounter.BYTES_READ);
  this.reporter = reporter;

  List<Statistics> matchedStats = null;
  if (this.reporter.getInputSplit() instanceof FileSplit) {
    matchedStats = getFsstatistics(((FileSplit) this.reporter
        .getInputSplit()).getPath(),job);
  }
  fsstats = matchedStats;

  bytesInPrev = getInputBytes(fsstats);
  rawIn = job.getInputFormat().getRecordReader(reporter.getInputSplit(),job,reporter);
  bytesInCurr = getInputBytes(fsstats);
  fileInputByteCounter.increment(bytesInCurr - bytesInPrev);
}
项目:hadoop    文件MapTask.java   
NewTrackingRecordReader(org.apache.hadoop.mapreduce.InputSplit split,org.apache.hadoop.mapreduce.InputFormat<K,V> inputFormat,TaskReporter reporter,org.apache.hadoop.mapreduce.TaskAttemptContext taskContext)
    throws InterruptedException,IOException {
  this.reporter = reporter;
  this.inputRecordCounter = reporter
      .getCounter(TaskCounter.MAP_INPUT_RECORDS);
  this.fileInputByteCounter = reporter
      .getCounter(FileInputFormatCounter.BYTES_READ);

  List <Statistics> matchedStats = null;
  if (split instanceof org.apache.hadoop.mapreduce.lib.input.FileSplit) {
    matchedStats = getFsstatistics(((org.apache.hadoop.mapreduce.lib.input.FileSplit) split)
        .getPath(),taskContext.getConfiguration());
  }
  fsstats = matchedStats;

  long bytesInPrev = getInputBytes(fsstats);
  this.real = inputFormat.createRecordReader(split,taskContext);
  long bytesInCurr = getInputBytes(fsstats);
  fileInputByteCounter.increment(bytesInCurr - bytesInPrev);
}
项目:hadoop    文件MapTask.java   
@SuppressWarnings("unchecked")
NewDirectOutputCollector(MRJobConfig jobContext,JobConf job,TaskUmbilicalProtocol umbilical,TaskReporter reporter) 
throws IOException,ClassNotFoundException,InterruptedException {
  this.reporter = reporter;
  mapOutputRecordCounter = reporter
      .getCounter(TaskCounter.MAP_OUTPUT_RECORDS);
  fileOutputByteCounter = reporter
      .getCounter(FileOutputFormatCounter.BYTES_WRITTEN);

  List<Statistics> matchedStats = null;
  if (outputFormat instanceof org.apache.hadoop.mapreduce.lib.output.FileOutputFormat) {
    matchedStats = getFsstatistics(org.apache.hadoop.mapreduce.lib.output.FileOutputFormat
        .getoutputPath(taskContext),taskContext.getConfiguration());
  }
  fsstats = matchedStats;

  long bytesOutPrev = getoutputBytes(fsstats);
  out = outputFormat.getRecordWriter(taskContext);
  long bytesOutCurr = getoutputBytes(fsstats);
  fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
}
项目:hadoop    文件MapTask.java   
@SuppressWarnings("unchecked")
public void init(MapOutputCollector.Context context
                ) throws IOException,ClassNotFoundException {
  this.reporter = context.getReporter();
  JobConf job = context.getJobConf();
  String finalName = getoutputName(getPartition());
  FileSystem fs = FileSystem.get(job);

  OutputFormat<K,V> outputFormat = job.getoutputFormat();   
  mapOutputRecordCounter = reporter.getCounter(TaskCounter.MAP_OUTPUT_RECORDS);

  fileOutputByteCounter = reporter
      .getCounter(FileOutputFormatCounter.BYTES_WRITTEN);

  List<Statistics> matchedStats = null;
  if (outputFormat instanceof FileOutputFormat) {
    matchedStats = getFsstatistics(FileOutputFormat.getoutputPath(job),job);
  }
  fsstats = matchedStats;

  long bytesOutPrev = getoutputBytes(fsstats);
  out = job.getoutputFormat().getRecordWriter(fs,finalName,reporter);
  long bytesOutCurr = getoutputBytes(fsstats);
  fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
}
项目:hadoop    文件ReduceTask.java   
@SuppressWarnings({ "deprecation","unchecked" })
public OldTrackingRecordWriter(ReduceTask reduce,String finalName) throws IOException {
  this.reduceOutputCounter = reduce.reduceOutputCounter;
  this.fileOutputByteCounter = reduce.fileOutputByteCounter;
  List<Statistics> matchedStats = null;
  if (job.getoutputFormat() instanceof FileOutputFormat) {
    matchedStats = getFsstatistics(FileOutputFormat.getoutputPath(job),job);
  }
  fsstats = matchedStats;

  FileSystem fs = FileSystem.get(job);
  long bytesOutPrev = getoutputBytes(fsstats);
  this.real = job.getoutputFormat().getRecordWriter(fs,reporter);
  long bytesOutCurr = getoutputBytes(fsstats);
  fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
}
项目:hadoop    文件ReduceTask.java   
@SuppressWarnings("unchecked")
NewTrackingRecordWriter(ReduceTask reduce,IOException {
  this.outputRecordCounter = reduce.reduceOutputCounter;
  this.fileOutputByteCounter = reduce.fileOutputByteCounter;

  List<Statistics> matchedStats = null;
  if (reduce.outputFormat instanceof org.apache.hadoop.mapreduce.lib.output.FileOutputFormat) {
    matchedStats = getFsstatistics(org.apache.hadoop.mapreduce.lib.output.FileOutputFormat
        .getoutputPath(taskContext),taskContext.getConfiguration());
  }

  fsstats = matchedStats;

  long bytesOutPrev = getoutputBytes(fsstats);
  this.real = (org.apache.hadoop.mapreduce.RecordWriter<K,V>) reduce.outputFormat
      .getRecordWriter(taskContext);
  long bytesOutCurr = getoutputBytes(fsstats);
  fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
}
项目:hadoop    文件FCStatisticsBaseTest.java   
@Test
public void testStatistics() throws IOException,true);
}
项目:aliyun-oss-hadoop-fs    文件MapTask.java   
TrackedRecordReader(TaskReporter reporter,reporter);
  bytesInCurr = getInputBytes(fsstats);
  fileInputByteCounter.increment(bytesInCurr - bytesInPrev);
}
项目:aliyun-oss-hadoop-fs    文件MapTask.java   
NewTrackingRecordReader(org.apache.hadoop.mapreduce.InputSplit split,taskContext);
  long bytesInCurr = getInputBytes(fsstats);
  fileInputByteCounter.increment(bytesInCurr - bytesInPrev);
}
项目:aliyun-oss-hadoop-fs    文件MapTask.java   
@SuppressWarnings("unchecked")
NewDirectOutputCollector(MRJobConfig jobContext,taskContext.getConfiguration());
  }
  fsstats = matchedStats;

  long bytesOutPrev = getoutputBytes(fsstats);
  out = outputFormat.getRecordWriter(taskContext);
  long bytesOutCurr = getoutputBytes(fsstats);
  fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
}
项目:aliyun-oss-hadoop-fs    文件MapTask.java   
@SuppressWarnings("unchecked")
public void init(MapOutputCollector.Context context
                ) throws IOException,reporter);
  long bytesOutCurr = getoutputBytes(fsstats);
  fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
}
项目:aliyun-oss-hadoop-fs    文件ReduceTask.java   
@SuppressWarnings({ "deprecation",reporter);
  long bytesOutCurr = getoutputBytes(fsstats);
  fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
}
项目:aliyun-oss-hadoop-fs    文件ReduceTask.java   
@SuppressWarnings("unchecked")
NewTrackingRecordWriter(ReduceTask reduce,V>) reduce.outputFormat
      .getRecordWriter(taskContext);
  long bytesOutCurr = getoutputBytes(fsstats);
  fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
}
项目:aliyun-oss-hadoop-fs    文件FCStatisticsBaseTest.java   
@Test
public void testStatistics() throws IOException,true);
}
项目:stocator    文件Swiftapiclient.java   
/**
 * Direct HTTP PUT request without JOSS package
 *
 * @param objName name of the object
 * @param contentType content type
 * @return HttpURLConnection
 */
@Override
public FSDataOutputStream createObject(String objName,String contentType,Map<String,String> Metadata,Statistics statistics) throws IOException {
  URL url = new URL(mJossAccount.getAccessURL() + "/" + getURLEncodedobjName(objName));
  LOG.debug("PUT {}. Content-Type : {}",url.toString(),contentType);

  // When overwriting an object,cached Metadata will be outdated
  String cachedname = getobjName(container + "/",objName);
  objectCache.remove(cachedname);

  try {
    OutputStream sos;
    if (nonStreamingUpload) {
      sos = new SwiftNoStreamingOutputStream(mJossAccount,url,contentType,Metadata,swiftConnectionManager,this);
    } else {
      sos = new SwiftOutputStream(mJossAccount,swiftConnectionManager);
    }
    return new FSDataOutputStream(sos,statistics);
  } catch (IOException e) {
    LOG.error(e.getMessage());
    throw e;
  }
}
项目:big-c    文件MapTask.java   
TrackedRecordReader(TaskReporter reporter,reporter);
  bytesInCurr = getInputBytes(fsstats);
  fileInputByteCounter.increment(bytesInCurr - bytesInPrev);
}
项目:big-c    文件MapTask.java   
NewTrackingRecordReader(org.apache.hadoop.mapreduce.InputSplit split,taskContext);
  long bytesInCurr = getInputBytes(fsstats);
  fileInputByteCounter.increment(bytesInCurr - bytesInPrev);
}
项目:big-c    文件MapTask.java   
@SuppressWarnings("unchecked")
NewDirectOutputCollector(MRJobConfig jobContext,taskContext.getConfiguration());
  }
  fsstats = matchedStats;

  long bytesOutPrev = getoutputBytes(fsstats);
  out = outputFormat.getRecordWriter(taskContext);
  long bytesOutCurr = getoutputBytes(fsstats);
  fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
}
项目:big-c    文件MapTask.java   
@SuppressWarnings("unchecked")
public void init(MapOutputCollector.Context context
                ) throws IOException,reporter);
  long bytesOutCurr = getoutputBytes(fsstats);
  fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
}
项目:big-c    文件ReduceTask.java   
@SuppressWarnings({ "deprecation",reporter);
  long bytesOutCurr = getoutputBytes(fsstats);
  fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
}
项目:big-c    文件ReduceTask.java   
@SuppressWarnings("unchecked")
NewTrackingRecordWriter(ReduceTask reduce,V>) reduce.outputFormat
      .getRecordWriter(taskContext);
  long bytesOutCurr = getoutputBytes(fsstats);
  fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
}
项目:big-c    文件FCStatisticsBaseTest.java   
@Test
public void testStatistics() throws IOException,true);
}
项目:hadoop-2.6.0-cdh5.4.3    文件MapTask.java   
TrackedRecordReader(TaskReporter reporter,reporter);
  bytesInCurr = getInputBytes(fsstats);
  fileInputByteCounter.increment(bytesInCurr - bytesInPrev);
}
项目:hadoop-2.6.0-cdh5.4.3    文件MapTask.java   
NewTrackingRecordReader(org.apache.hadoop.mapreduce.InputSplit split,taskContext);
  long bytesInCurr = getInputBytes(fsstats);
  fileInputByteCounter.increment(bytesInCurr - bytesInPrev);
}
项目:hadoop-2.6.0-cdh5.4.3    文件MapTask.java   
@SuppressWarnings("unchecked")
NewDirectOutputCollector(MRJobConfig jobContext,taskContext.getConfiguration());
  }
  fsstats = matchedStats;

  long bytesOutPrev = getoutputBytes(fsstats);
  out = outputFormat.getRecordWriter(taskContext);
  long bytesOutCurr = getoutputBytes(fsstats);
  fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
}
项目:hadoop-2.6.0-cdh5.4.3    文件MapTask.java   
@SuppressWarnings("unchecked")
public void init(MapOutputCollector.Context context
                ) throws IOException,reporter);
  long bytesOutCurr = getoutputBytes(fsstats);
  fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
}
项目:hadoop-2.6.0-cdh5.4.3    文件ReduceTask.java   
@SuppressWarnings({ "deprecation",reporter);
  long bytesOutCurr = getoutputBytes(fsstats);
  fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
}
项目:hadoop-2.6.0-cdh5.4.3    文件ReduceTask.java   
@SuppressWarnings("unchecked")
NewTrackingRecordWriter(ReduceTask reduce,V>) reduce.outputFormat
      .getRecordWriter(taskContext);
  long bytesOutCurr = getoutputBytes(fsstats);
  fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
}
项目:hadoop-2.6.0-cdh5.4.3    文件FCStatisticsBaseTest.java   
@Test
public void testStatistics() throws IOException,true);
}
项目:hadoop-plus    文件MapTask.java   
TrackedRecordReader(TaskReporter reporter,reporter);
  bytesInCurr = getInputBytes(fsstats);
  fileInputByteCounter.increment(bytesInCurr - bytesInPrev);
}
项目:hadoop-plus    文件MapTask.java   
NewTrackingRecordReader(org.apache.hadoop.mapreduce.InputSplit split,taskContext);
  long bytesInCurr = getInputBytes(fsstats);
  fileInputByteCounter.increment(bytesInCurr - bytesInPrev);
}
项目:hadoop-plus    文件MapTask.java   
@SuppressWarnings("unchecked")
NewDirectOutputCollector(MRJobConfig jobContext,taskContext.getConfiguration());
  }
  fsstats = matchedStats;

  long bytesOutPrev = getoutputBytes(fsstats);
  out = outputFormat.getRecordWriter(taskContext);
  long bytesOutCurr = getoutputBytes(fsstats);
  fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
}
项目:hadoop-plus    文件MapTask.java   
@SuppressWarnings("unchecked")
public void init(MapOutputCollector.Context context
                ) throws IOException,reporter);
  long bytesOutCurr = getoutputBytes(fsstats);
  fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
}
项目:hadoop-plus    文件ReduceTask.java   
@SuppressWarnings({ "deprecation",reporter);
  long bytesOutCurr = getoutputBytes(fsstats);
  fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
}
项目:hadoop-plus    文件ReduceTask.java   
@SuppressWarnings("unchecked")
NewTrackingRecordWriter(ReduceTask reduce,V>) reduce.outputFormat
      .getRecordWriter(taskContext);
  long bytesOutCurr = getoutputBytes(fsstats);
  fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
}
项目:hops    文件FCStatisticsBaseTest.java   
@Test
public void testStatistics() throws IOException,true);
}
项目:FlexMap    文件MapTask.java   
TrackedRecordReader(TaskReporter reporter,reporter);
  bytesInCurr = getInputBytes(fsstats);
  fileInputByteCounter.increment(bytesInCurr - bytesInPrev);
}
项目:FlexMap    文件MapTask.java   
NewTrackingRecordReader(org.apache.hadoop.mapreduce.InputSplit split,taskContext);
  long bytesInCurr = getInputBytes(fsstats);
  fileInputByteCounter.increment(bytesInCurr - bytesInPrev);
}

相关文章

买水果
比较全面的redis工具类
gson 反序列化到多态子类
java 版本的 mb_strwidth
JAVA 反转字符串的最快方法,大概比StringBuffer.reverse()性...
com.google.gson.internal.bind.ArrayTypeAdapter的实例源码...