Java源码示例:org.apache.hive.hcatalog.streaming.StreamingException
示例1
/**
* Write the record data to Hive
*
* @throws IOException if an error occurs during the write
* @throws InterruptedException if the write operation is interrupted
*/
public synchronized void write(final byte[] record)
throws WriteFailure, SerializationError, InterruptedException {
if (closed) {
throw new IllegalStateException("This hive streaming writer was closed " +
"and thus no longer able to write : " + endPoint);
}
// write the tuple
try {
LOG.debug("Writing event to {}", endPoint);
callWithTimeout(new CallRunner<Void>() {
@Override
public Void call() throws StreamingException, InterruptedException {
txnBatch.write(record);
totalRecords++;
return null;
}
});
} catch (SerializationError se) {
throw new SerializationError(endPoint.toString() + " SerializationError", se);
} catch (StreamingException | TimeoutException e) {
throw new WriteFailure(endPoint, txnBatch.getCurrentTxnId(), e);
}
}
示例2
/**
* Commits the current Txn if totalRecordsPerTransaction > 0 .
* If 'rollToNext' is true, will switch to next Txn in batch or to a
* new TxnBatch if current Txn batch is exhausted
*/
public void flush(boolean rollToNext)
throws CommitFailure, TxnBatchFailure, TxnFailure, InterruptedException {
// if there are no records do not call flush
if (totalRecords <= 0) return;
try {
synchronized (txnBatchLock) {
commitTxn();
nextTxn(rollToNext);
totalRecords = 0;
lastUsed = System.currentTimeMillis();
}
} catch (StreamingException e) {
throw new TxnFailure(txnBatch, e);
}
}
示例3
protected RecordWriter getRecordWriter(HiveEndPoint endPoint, UserGroupInformation ugi, HiveConf hiveConf) throws StreamingException, IOException, InterruptedException {
if (ugi == null) {
return new StrictJsonWriter(endPoint, hiveConf);
} else {
try {
return ugi.doAs((PrivilegedExceptionAction<StrictJsonWriter>) () -> new StrictJsonWriter(endPoint, hiveConf));
} catch (UndeclaredThrowableException e) {
Throwable cause = e.getCause();
if (cause instanceof StreamingException) {
throw (StreamingException) cause;
} else {
throw e;
}
}
}
}
示例4
/**
* Write the record data to Hive
*
* @throws IOException if an error occurs during the write
* @throws InterruptedException if the write operation is interrupted
*/
public synchronized void write(final byte[] record)
throws WriteFailure, SerializationError, InterruptedException {
if (closed) {
throw new IllegalStateException("This hive streaming writer was closed " +
"and thus no longer able to write : " + endPoint);
}
// write the tuple
try {
LOG.debug("Writing event to {}", endPoint);
callWithTimeout(new CallRunner<Void>() {
@Override
public Void call() throws StreamingException, InterruptedException {
txnBatch.write(record);
totalRecords++;
return null;
}
});
} catch (SerializationError se) {
throw new SerializationError(endPoint.toString() + " SerializationError", se);
} catch (StreamingException | TimeoutException e) {
throw new WriteFailure(endPoint, txnBatch.getCurrentTxnId(), e);
}
}
示例5
/**
* Commits the current Txn if totalRecordsPerTransaction > 0 .
* If 'rollToNext' is true, will switch to next Txn in batch or to a
* new TxnBatch if current Txn batch is exhausted
*/
public void flush(boolean rollToNext)
throws CommitFailure, TxnBatchFailure, TxnFailure, InterruptedException {
// if there are no records do not call flush
if (totalRecords <= 0) return;
try {
synchronized (txnBatchLock) {
commitTxn();
nextTxn(rollToNext);
totalRecords = 0;
lastUsed = System.currentTimeMillis();
}
} catch (StreamingException e) {
throw new TxnFailure(txnBatch, e);
}
}
示例6
/**
* Abort current Txn on all writers
*/
private void abortAllWriters() throws InterruptedException, StreamingException, HiveWriter.TxnBatchFailure {
for (Map.Entry<HiveEndPoint, HiveWriter> entry : allWriters.entrySet()) {
try {
entry.getValue().abort();
} catch (Exception e) {
getLogger().error("Failed to abort hive transaction batch, HiveEndPoint " + entry.getValue() + " due to exception ", e);
}
}
}
示例7
protected RecordWriter getRecordWriter(HiveEndPoint endPoint, UserGroupInformation ugi, HiveConf hiveConf) throws StreamingException, IOException, InterruptedException {
if (ugi == null) {
return new StrictJsonWriter(endPoint, hiveConf);
} else {
return ugi.doAs((PrivilegedExceptionAction<StrictJsonWriter>) () -> new StrictJsonWriter(endPoint, hiveConf));
}
}
示例8
protected void commitTxn() throws CommitFailure, InterruptedException {
LOG.debug("Committing Txn id {} to {}", txnBatch.getCurrentTxnId(), endPoint);
try {
callWithTimeout(new CallRunner<Void>() {
@Override
public Void call() throws Exception {
txnBatch.commit(); // could block
return null;
}
});
} catch (StreamingException | TimeoutException e) {
throw new CommitFailure(endPoint, txnBatch.getCurrentTxnId(), e);
}
}
示例9
protected StreamingConnection newConnection(HiveEndPoint endPoint, boolean autoCreatePartitions, HiveConf conf, UserGroupInformation ugi) throws InterruptedException, ConnectFailure {
try {
return callWithTimeout(() -> {
return endPoint.newConnection(autoCreatePartitions, conf, ugi); // could block
});
} catch (StreamingException | TimeoutException e) {
throw new ConnectFailure(endPoint, e);
}
}
示例10
protected TransactionBatch nextTxnBatch(final RecordWriter recordWriter)
throws InterruptedException, TxnBatchFailure {
LOG.debug("Fetching new Txn Batch for {}", endPoint);
TransactionBatch batch = null;
try {
batch = callWithTimeout(() -> {
return connection.fetchTransactionBatch(txnsPerBatch, recordWriter); // could block
});
batch.beginNextTransaction();
LOG.debug("Acquired {}. Switching to first txn", batch);
} catch (TimeoutException | StreamingException e) {
throw new TxnBatchFailure(endPoint, e);
}
return batch;
}
示例11
/**
* Aborts the current Txn and switches to next Txn.
* @throws StreamingException if could not get new Transaction Batch, or switch to next Txn
*/
public void abort() throws StreamingException, TxnBatchFailure, InterruptedException {
synchronized (txnBatchLock) {
abortTxn();
nextTxn(true); // roll to next
}
}
示例12
/**
* if there are remainingTransactions in current txnBatch, begins nextTransactions
* otherwise creates new txnBatch.
* @param rollToNext Whether to roll to the next transaction batch
*/
protected void nextTxn(boolean rollToNext) throws StreamingException, InterruptedException, TxnBatchFailure {
if (txnBatch.remainingTransactions() == 0) {
closeTxnBatch();
txnBatch = null;
if (rollToNext) {
txnBatch = nextTxnBatch(recordWriter);
}
} else if (rollToNext) {
LOG.debug("Switching to next Txn for {}", endPoint);
txnBatch.beginNextTransaction(); // does not block
}
}
示例13
/**
* Execute the callable on a separate thread and wait for the completion
* for the specified amount of time in milliseconds. In case of timeout
* cancel the callable and throw an IOException
*/
private <T> T callWithTimeout(final CallRunner<T> callRunner)
throws TimeoutException, StreamingException, InterruptedException {
Future<T> future = callTimeoutPool.submit(() -> {
if (ugi == null) {
return callRunner.call();
}
return ugi.doAs((PrivilegedExceptionAction<T>) () -> callRunner.call());
});
try {
if (callTimeout > 0) {
return future.get(callTimeout, TimeUnit.MILLISECONDS);
} else {
return future.get();
}
} catch (TimeoutException eT) {
future.cancel(true);
throw eT;
} catch (ExecutionException e1) {
Throwable cause = e1.getCause();
if (cause instanceof IOException) {
throw new StreamingIOFailure("I/O Failure", (IOException) cause);
} else if (cause instanceof StreamingException) {
throw (StreamingException) cause;
} else if (cause instanceof InterruptedException) {
throw (InterruptedException) cause;
} else if (cause instanceof RuntimeException) {
throw (RuntimeException) cause;
} else if (cause instanceof TimeoutException) {
throw new StreamingException("Operation Timed Out.", (TimeoutException) cause);
} else {
throw new RuntimeException(e1);
}
}
}
示例14
@Override
public StreamingConnection load(HiveEndPoint endPoint) throws StageException {
StreamingConnection connection;
try {
connection = endPoint.newConnection(autoCreatePartitions, hiveConf, loginUgi);
} catch (StreamingException | InterruptedException e) {
throw new StageException(Errors.HIVE_09, e.toString(), e);
}
return connection;
}
示例15
/**
* Abort current Txn on all writers
*/
private void abortAllWriters(Map<HiveEndPoint, HiveWriter> writers) throws InterruptedException, StreamingException, HiveWriter.TxnBatchFailure {
for (Map.Entry<HiveEndPoint, HiveWriter> entry : writers.entrySet()) {
try {
entry.getValue().abort();
} catch (Exception e) {
getLogger().error("Failed to abort hive transaction batch, HiveEndPoint " + entry.getValue() + " due to exception ", e);
}
}
}
示例16
protected void commitTxn() throws CommitFailure, InterruptedException {
LOG.debug("Committing Txn id {} to {}", txnBatch.getCurrentTxnId(), endPoint);
try {
callWithTimeout(new CallRunner<Void>() {
@Override
public Void call() throws Exception {
txnBatch.commit(); // could block
return null;
}
});
} catch (StreamingException | TimeoutException e) {
throw new CommitFailure(endPoint, txnBatch.getCurrentTxnId(), e);
}
}
示例17
protected StreamingConnection newConnection(HiveEndPoint endPoint, boolean autoCreatePartitions, HiveConf conf, UserGroupInformation ugi) throws InterruptedException, ConnectFailure {
try {
return callWithTimeout(() -> {
return endPoint.newConnection(autoCreatePartitions, conf, ugi); // could block
});
} catch (StreamingException | TimeoutException e) {
throw new ConnectFailure(endPoint, e);
}
}
示例18
protected TransactionBatch nextTxnBatch(final RecordWriter recordWriter)
throws InterruptedException, TxnBatchFailure {
LOG.debug("Fetching new Txn Batch for {}", endPoint);
TransactionBatch batch = null;
try {
batch = callWithTimeout(() -> {
return connection.fetchTransactionBatch(txnsPerBatch, recordWriter); // could block
});
batch.beginNextTransaction();
LOG.debug("Acquired {}. Switching to first txn", batch);
} catch (TimeoutException | StreamingException e) {
throw new TxnBatchFailure(endPoint, e);
}
return batch;
}
示例19
/**
* Aborts the current Txn and switches to next Txn.
* @throws StreamingException if could not get new Transaction Batch, or switch to next Txn
*/
public void abort() throws StreamingException, TxnBatchFailure, InterruptedException {
synchronized (txnBatchLock) {
abortTxn();
nextTxn(true); // roll to next
}
}
示例20
/**
* if there are remainingTransactions in current txnBatch, begins nextTransactions
* otherwise creates new txnBatch.
* @param rollToNext Whether to roll to the next transaction batch
*/
protected void nextTxn(boolean rollToNext) throws StreamingException, InterruptedException, TxnBatchFailure {
if (txnBatch.remainingTransactions() == 0) {
closeTxnBatch();
txnBatch = null;
if (rollToNext) {
txnBatch = nextTxnBatch(recordWriter);
}
} else if (rollToNext) {
LOG.debug("Switching to next Txn for {}", endPoint);
txnBatch.beginNextTransaction(); // does not block
}
}
示例21
@Test(expected = HiveWriter.ConnectFailure.class)
public void testRecordWriterStreamingException() throws Exception {
recordWriterCallable = mock(Callable.class);
StreamingException streamingException = new StreamingException("Test Exception");
when(recordWriterCallable.call()).thenThrow(streamingException);
try {
initWriter();
} catch (HiveWriter.ConnectFailure e) {
assertEquals(streamingException, e.getCause());
throw e;
}
}
示例22
@Override
protected RecordWriter getRecordWriter(HiveEndPoint endPoint, UserGroupInformation ugi, HiveConf conf) throws StreamingException {
assertEquals(hiveConf, conf);
return mock(RecordWriter.class);
}
示例23
@Override
protected void nextTxn(boolean rollToNext) throws StreamingException, InterruptedException, TxnBatchFailure {
// Empty
}
示例24
@Override
public RecordWriter createRecordWriter(HiveEndPoint hiveEndPoint) throws StreamingException, IOException, ClassNotFoundException {
List<String> result = fields.stream().map(String::toLowerCase).collect(Collectors.toList());
return new DelimitedInputWriter(result.toArray(new String[0]), fieldDelimiter, hiveEndPoint);
}
示例25
@Override
public void write(TransactionBatch transactionBatch, Tuple tuple) throws StreamingException, IOException, InterruptedException {
transactionBatch.write(mapRecord(tuple));
}
示例26
private TransactionBatch getBatch(int batchSize, HiveEndPoint endPoint) throws InterruptedException,
StreamingException, ExecutionException {
return hiveConnectionPool.get(endPoint).fetchTransactionBatch(batchSize, recordWriterPool.get(endPoint));
}
示例27
@Override
protected RecordWriter getRecordWriter(HiveEndPoint endPoint, UserGroupInformation ugi, HiveConf conf) throws StreamingException {
assertEquals(hiveConf, conf);
return mock(RecordWriter.class);
}
示例28
@Override
protected void nextTxn(boolean rollToNext) throws StreamingException, InterruptedException, TxnBatchFailure {
// Empty
}
示例29
@Override
public void abort() throws StreamingException, TxnBatchFailure, InterruptedException {
}
示例30
@Override
public void abort() throws StreamingException, TxnBatchFailure, InterruptedException {
}