项目:angel
文件:CounterUpdater.java
void updateCounters() {
String counterPrifix = schema.toupperCase() + "_";
long readBytes = 0;
long writeBytes = 0;
long readOps = 0;
long largeReadOps = 0;
long writeOps = 0;
for (FileSystem.Statistics stat : stats) {
readBytes = readBytes + stat.getBytesRead();
writeBytes = writeBytes + stat.getBytesWritten();
readOps = readOps + stat.getReadOps();
largeReadOps = largeReadOps + stat.getLargeReadOps();
writeOps = writeOps + stat.getWriteOps();
}
PSAgentContext.get().getMetrics()
.put(counterPrifix + AngelCounter.BYTES_READ,Long.toString(readBytes));
PSAgentContext.get().getMetrics()
.put(counterPrifix.toString() + AngelCounter.BYTES_WRITTEN,Long.toString(writeBytes));
PSAgentContext.get().getMetrics()
.put(counterPrifix + AngelCounter.READ_OPS,Long.toString(readOps));
PSAgentContext.get().getMetrics()
.put(counterPrifix + AngelCounter.LARGE_READ_OPS,Long.toString(largeReadOps));
PSAgentContext.get().getMetrics()
.put(counterPrifix + AngelCounter.WRITE_OPS,Long.toString(writeOps));
}
项目:hadoop-oss
文件:FCStatisticsBaseTest.java
@Test
public void testStatistics() throws IOException,URISyntaxException {
URI fsUri = getFsUri();
Statistics stats = FileContext.getStatistics(fsUri);
Assert.assertEquals(0,stats.getBytesRead());
Path filePath = fileContextTestHelper .getTestRootPath(fc,"file1");
createFile(fc,filePath,numBlocks,blockSize);
Assert.assertEquals(0,stats.getBytesRead());
verifyWrittenBytes(stats);
FSDataInputStream fstr = fc.open(filePath);
byte[] buf = new byte[blockSize];
int bytesRead = fstr.read(buf,blockSize);
fstr.read(0,buf,blockSize);
Assert.assertEquals(blockSize,bytesRead);
verifyReadBytes(stats);
verifyWrittenBytes(stats);
verifyReadBytes(FileContext.getStatistics(getFsUri()));
Map<URI,Statistics> statsMap = FileContext.getAllStatistics();
URI exactUri = getSchemeAuthorityUri();
verifyWrittenBytes(statsMap.get(exactUri));
fc.delete(filePath,true);
}
项目:hadoop
文件:MapTask.java
TrackedRecordReader(TaskReporter reporter,JobConf job)
throws IOException{
inputRecordCounter = reporter.getCounter(TaskCounter.MAP_INPUT_RECORDS);
fileInputByteCounter = reporter.getCounter(FileInputFormatCounter.BYTES_READ);
this.reporter = reporter;
List<Statistics> matchedStats = null;
if (this.reporter.getInputSplit() instanceof FileSplit) {
matchedStats = getFsstatistics(((FileSplit) this.reporter
.getInputSplit()).getPath(),job);
}
fsstats = matchedStats;
bytesInPrev = getInputBytes(fsstats);
rawIn = job.getInputFormat().getRecordReader(reporter.getInputSplit(),job,reporter);
bytesInCurr = getInputBytes(fsstats);
fileInputByteCounter.increment(bytesInCurr - bytesInPrev);
}
项目:hadoop
文件:MapTask.java
NewTrackingRecordReader(org.apache.hadoop.mapreduce.InputSplit split,org.apache.hadoop.mapreduce.InputFormat<K,V> inputFormat,TaskReporter reporter,org.apache.hadoop.mapreduce.TaskAttemptContext taskContext)
throws InterruptedException,IOException {
this.reporter = reporter;
this.inputRecordCounter = reporter
.getCounter(TaskCounter.MAP_INPUT_RECORDS);
this.fileInputByteCounter = reporter
.getCounter(FileInputFormatCounter.BYTES_READ);
List <Statistics> matchedStats = null;
if (split instanceof org.apache.hadoop.mapreduce.lib.input.FileSplit) {
matchedStats = getFsstatistics(((org.apache.hadoop.mapreduce.lib.input.FileSplit) split)
.getPath(),taskContext.getConfiguration());
}
fsstats = matchedStats;
long bytesInPrev = getInputBytes(fsstats);
this.real = inputFormat.createRecordReader(split,taskContext);
long bytesInCurr = getInputBytes(fsstats);
fileInputByteCounter.increment(bytesInCurr - bytesInPrev);
}
项目:hadoop
文件:MapTask.java
@SuppressWarnings("unchecked")
NewDirectOutputCollector(MRJobConfig jobContext,JobConf job,TaskUmbilicalProtocol umbilical,TaskReporter reporter)
throws IOException,ClassNotFoundException,InterruptedException {
this.reporter = reporter;
mapOutputRecordCounter = reporter
.getCounter(TaskCounter.MAP_OUTPUT_RECORDS);
fileOutputByteCounter = reporter
.getCounter(FileOutputFormatCounter.BYTES_WRITTEN);
List<Statistics> matchedStats = null;
if (outputFormat instanceof org.apache.hadoop.mapreduce.lib.output.FileOutputFormat) {
matchedStats = getFsstatistics(org.apache.hadoop.mapreduce.lib.output.FileOutputFormat
.getoutputPath(taskContext),taskContext.getConfiguration());
}
fsstats = matchedStats;
long bytesOutPrev = getoutputBytes(fsstats);
out = outputFormat.getRecordWriter(taskContext);
long bytesOutCurr = getoutputBytes(fsstats);
fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
}
项目:hadoop
文件:MapTask.java
@SuppressWarnings("unchecked")
public void init(MapOutputCollector.Context context
) throws IOException,ClassNotFoundException {
this.reporter = context.getReporter();
JobConf job = context.getJobConf();
String finalName = getoutputName(getPartition());
FileSystem fs = FileSystem.get(job);
OutputFormat<K,V> outputFormat = job.getoutputFormat();
mapOutputRecordCounter = reporter.getCounter(TaskCounter.MAP_OUTPUT_RECORDS);
fileOutputByteCounter = reporter
.getCounter(FileOutputFormatCounter.BYTES_WRITTEN);
List<Statistics> matchedStats = null;
if (outputFormat instanceof FileOutputFormat) {
matchedStats = getFsstatistics(FileOutputFormat.getoutputPath(job),job);
}
fsstats = matchedStats;
long bytesOutPrev = getoutputBytes(fsstats);
out = job.getoutputFormat().getRecordWriter(fs,finalName,reporter);
long bytesOutCurr = getoutputBytes(fsstats);
fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
}
项目:hadoop
文件:ReduceTask.java
@SuppressWarnings({ "deprecation","unchecked" })
public OldTrackingRecordWriter(ReduceTask reduce,String finalName) throws IOException {
this.reduceOutputCounter = reduce.reduceOutputCounter;
this.fileOutputByteCounter = reduce.fileOutputByteCounter;
List<Statistics> matchedStats = null;
if (job.getoutputFormat() instanceof FileOutputFormat) {
matchedStats = getFsstatistics(FileOutputFormat.getoutputPath(job),job);
}
fsstats = matchedStats;
FileSystem fs = FileSystem.get(job);
long bytesOutPrev = getoutputBytes(fsstats);
this.real = job.getoutputFormat().getRecordWriter(fs,reporter);
long bytesOutCurr = getoutputBytes(fsstats);
fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
}
项目:hadoop
文件:ReduceTask.java
@SuppressWarnings("unchecked")
NewTrackingRecordWriter(ReduceTask reduce,IOException {
this.outputRecordCounter = reduce.reduceOutputCounter;
this.fileOutputByteCounter = reduce.fileOutputByteCounter;
List<Statistics> matchedStats = null;
if (reduce.outputFormat instanceof org.apache.hadoop.mapreduce.lib.output.FileOutputFormat) {
matchedStats = getFsstatistics(org.apache.hadoop.mapreduce.lib.output.FileOutputFormat
.getoutputPath(taskContext),taskContext.getConfiguration());
}
fsstats = matchedStats;
long bytesOutPrev = getoutputBytes(fsstats);
this.real = (org.apache.hadoop.mapreduce.RecordWriter<K,V>) reduce.outputFormat
.getRecordWriter(taskContext);
long bytesOutCurr = getoutputBytes(fsstats);
fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
}
项目:hadoop
文件:FCStatisticsBaseTest.java
@Test
public void testStatistics() throws IOException,true);
}
项目:aliyun-oss-hadoop-fs
文件:MapTask.java
TrackedRecordReader(TaskReporter reporter,reporter);
bytesInCurr = getInputBytes(fsstats);
fileInputByteCounter.increment(bytesInCurr - bytesInPrev);
}
项目:aliyun-oss-hadoop-fs
文件:MapTask.java
NewTrackingRecordReader(org.apache.hadoop.mapreduce.InputSplit split,taskContext);
long bytesInCurr = getInputBytes(fsstats);
fileInputByteCounter.increment(bytesInCurr - bytesInPrev);
}
项目:aliyun-oss-hadoop-fs
文件:MapTask.java
@SuppressWarnings("unchecked")
NewDirectOutputCollector(MRJobConfig jobContext,taskContext.getConfiguration());
}
fsstats = matchedStats;
long bytesOutPrev = getoutputBytes(fsstats);
out = outputFormat.getRecordWriter(taskContext);
long bytesOutCurr = getoutputBytes(fsstats);
fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
}
项目:aliyun-oss-hadoop-fs
文件:MapTask.java
项目:aliyun-oss-hadoop-fs
文件:ReduceTask.java
项目:aliyun-oss-hadoop-fs
文件:ReduceTask.java
项目:aliyun-oss-hadoop-fs
文件:FCStatisticsBaseTest.java
@Test
public void testStatistics() throws IOException,true);
}
/**
* Direct HTTP PUT request without JOSS package
*
* @param objName name of the object
* @param contentType content type
* @return HttpURLConnection
*/
@Override
public FSDataOutputStream createObject(String objName,String contentType,Map<String,String> Metadata,Statistics statistics) throws IOException {
URL url = new URL(mJossAccount.getAccessURL() + "/" + getURLEncodedobjName(objName));
LOG.debug("PUT {}. Content-Type : {}",url.toString(),contentType);
// When overwriting an object,cached Metadata will be outdated
String cachedname = getobjName(container + "/",objName);
objectCache.remove(cachedname);
try {
OutputStream sos;
if (nonStreamingUpload) {
sos = new SwiftNoStreamingOutputStream(mJossAccount,url,contentType,Metadata,swiftConnectionManager,this);
} else {
sos = new SwiftOutputStream(mJossAccount,swiftConnectionManager);
}
return new FSDataOutputStream(sos,statistics);
} catch (IOException e) {
LOG.error(e.getMessage());
throw e;
}
}
项目:big-c
文件:MapTask.java
TrackedRecordReader(TaskReporter reporter,reporter);
bytesInCurr = getInputBytes(fsstats);
fileInputByteCounter.increment(bytesInCurr - bytesInPrev);
}
项目:big-c
文件:MapTask.java
NewTrackingRecordReader(org.apache.hadoop.mapreduce.InputSplit split,taskContext);
long bytesInCurr = getInputBytes(fsstats);
fileInputByteCounter.increment(bytesInCurr - bytesInPrev);
}
项目:big-c
文件:MapTask.java
@SuppressWarnings("unchecked")
NewDirectOutputCollector(MRJobConfig jobContext,taskContext.getConfiguration());
}
fsstats = matchedStats;
long bytesOutPrev = getoutputBytes(fsstats);
out = outputFormat.getRecordWriter(taskContext);
long bytesOutCurr = getoutputBytes(fsstats);
fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
}
项目:big-c
文件:MapTask.java
项目:big-c
文件:ReduceTask.java
项目:big-c
文件:ReduceTask.java
项目:big-c
文件:FCStatisticsBaseTest.java
@Test
public void testStatistics() throws IOException,true);
}
项目:hadoop-2.6.0-cdh5.4.3
文件:MapTask.java
TrackedRecordReader(TaskReporter reporter,reporter);
bytesInCurr = getInputBytes(fsstats);
fileInputByteCounter.increment(bytesInCurr - bytesInPrev);
}
项目:hadoop-2.6.0-cdh5.4.3
文件:MapTask.java
NewTrackingRecordReader(org.apache.hadoop.mapreduce.InputSplit split,taskContext);
long bytesInCurr = getInputBytes(fsstats);
fileInputByteCounter.increment(bytesInCurr - bytesInPrev);
}
项目:hadoop-2.6.0-cdh5.4.3
文件:MapTask.java
@SuppressWarnings("unchecked")
NewDirectOutputCollector(MRJobConfig jobContext,taskContext.getConfiguration());
}
fsstats = matchedStats;
long bytesOutPrev = getoutputBytes(fsstats);
out = outputFormat.getRecordWriter(taskContext);
long bytesOutCurr = getoutputBytes(fsstats);
fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
}
项目:hadoop-2.6.0-cdh5.4.3
文件:MapTask.java
项目:hadoop-2.6.0-cdh5.4.3
文件:ReduceTask.java
项目:hadoop-2.6.0-cdh5.4.3
文件:ReduceTask.java
项目:hadoop-2.6.0-cdh5.4.3
文件:FCStatisticsBaseTest.java
@Test
public void testStatistics() throws IOException,true);
}
项目:hadoop-plus
文件:MapTask.java
TrackedRecordReader(TaskReporter reporter,reporter);
bytesInCurr = getInputBytes(fsstats);
fileInputByteCounter.increment(bytesInCurr - bytesInPrev);
}
项目:hadoop-plus
文件:MapTask.java
NewTrackingRecordReader(org.apache.hadoop.mapreduce.InputSplit split,taskContext);
long bytesInCurr = getInputBytes(fsstats);
fileInputByteCounter.increment(bytesInCurr - bytesInPrev);
}
项目:hadoop-plus
文件:MapTask.java
@SuppressWarnings("unchecked")
NewDirectOutputCollector(MRJobConfig jobContext,taskContext.getConfiguration());
}
fsstats = matchedStats;
long bytesOutPrev = getoutputBytes(fsstats);
out = outputFormat.getRecordWriter(taskContext);
long bytesOutCurr = getoutputBytes(fsstats);
fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
}
项目:hadoop-plus
文件:MapTask.java
项目:hadoop-plus
文件:ReduceTask.java
项目:hadoop-plus
文件:ReduceTask.java
项目:hops
文件:FCStatisticsBaseTest.java
@Test
public void testStatistics() throws IOException,true);
}
项目:FlexMap
文件:MapTask.java
TrackedRecordReader(TaskReporter reporter,reporter);
bytesInCurr = getInputBytes(fsstats);
fileInputByteCounter.increment(bytesInCurr - bytesInPrev);
}
项目:FlexMap
文件:MapTask.java
NewTrackingRecordReader(org.apache.hadoop.mapreduce.InputSplit split,taskContext);
long bytesInCurr = getInputBytes(fsstats);
fileInputByteCounter.increment(bytesInCurr - bytesInPrev);
}