Java源码示例:com.datatorrent.api.Context
示例1
@Override
public void setup(Context.OperatorContext context)
{
AggregatorRegistry.DEFAULT_AGGREGATOR_REGISTRY.setup();
schema = new DimensionalSchema(new DimensionalConfigurationSchema(eventSchemaJSON,
AggregatorRegistry.DEFAULT_AGGREGATOR_REGISTRY));
maxProductId = 100;
maxCustomerId = 30;
maxChannelId = schema.getDimensionalConfigurationSchema().getKeysToEnumValuesList().get(KEY_CHANNEL).size();
maxRegionId = schema.getDimensionalConfigurationSchema().getKeysToEnumValuesList().get(KEY_REGION).size();
tuplesPerCurrentWindow = maxTuplesPerWindow;
generateDiscounts();
generateRegionalTax();
initializeDataGenerators();
}
示例2
@Override
public void setup(Context.OperatorContext context)
{
super.setup(context);
try {
loader = new FileLoader(filePath, backupResource);
reloadData();
lastScanTimeStamp = System.currentTimeMillis();
if (updateKeys != null) {
keyList = Lists.newArrayList(updateKeys.split(","));
}
} catch (IOException ex) {
throw new RuntimeException("Failed to load mappings from the file.");
}
}
示例3
@Override
public void populateDAG(DAG dag, Configuration conf)
{
dag.setAttribute(DAG.APPLICATION_NAME, "HDHTBenchmarkApplication");
Generator gen = dag.addOperator("Generator", new Generator());
gen.setTupleBlast(1000);
gen.setSleepms(0);
dag.getOperatorMeta("Generator").getAttributes().put(Context.OperatorContext.APPLICATION_WINDOW_COUNT, 1);
HDSOperator hdsOut = dag.addOperator("Store", new HDSOperator());
TFileImpl.DTFileImpl hdsFile = new TFileImpl.DTFileImpl();
hdsFile.setBasePath("WALBenchMarkDir");
hdsOut.setFileStore(hdsFile);
dag.getOperatorMeta("Store").getAttributes().put(Context.OperatorContext.COUNTERS_AGGREGATOR, new HDHTWriter.BucketIOStatAggregator());
dag.addStream("s1", gen.out, hdsOut.input).setLocality(DAG.Locality.THREAD_LOCAL);
}
示例4
private void addLibraryJarsToClasspath(LogicalPlan lp) throws MalformedURLException
{
String libJarsCsv = lp.getAttributes().get(Context.DAGContext.LIBRARY_JARS);
if (libJarsCsv != null && libJarsCsv.length() != 0) {
String[] split = libJarsCsv.split(StramClient.LIB_JARS_SEP);
if (split.length != 0) {
URL[] urlList = new URL[split.length];
for (int i = 0; i < split.length; i++) {
File file = new File(split[i]);
urlList[i] = file.toURI().toURL();
}
// Set class loader.
ClassLoader prevCl = Thread.currentThread().getContextClassLoader();
URLClassLoader cl = URLClassLoader.newInstance(urlList, prevCl);
Thread.currentThread().setContextClassLoader(cl);
}
}
}
示例5
/**
* Activate the Parser
*/
@Override
public void activate(Context context)
{
try {
if (clazz != null) {
setters = new ArrayList<>();
List<String> fieldNames = schema.getFieldNames();
if (fieldNames != null) {
for (String fieldName : fieldNames) {
addSetter(fieldName);
}
}
}
} catch (Exception e) {
logger.error("Cannot activate Parser Reason {}", e.getMessage());
throw e;
}
}
示例6
/**
* This creates a {@link ConfElement}.
*
* @param element The current {@link StramElement} representing a {@link ConfElement}.
* @param parent The parent {@link ConfElement}.
* @param additionalRelatedElements Any additional {@link StramElement} that could be
* related to this {@link ConfElement}.
* @param contextClass The {@link Context} class that contains all the attributes to
* be used by this {@link ConfElement}.
*/
ConfElement(StramElement element,
ConfElement parent,
Set<StramElement> additionalRelatedElements,
Class<? extends Context> contextClass)
{
this.element = element;
this.parent = parent;
if (additionalRelatedElements != null) {
this.allRelatedElements.addAll(additionalRelatedElements);
}
this.allRelatedElements.add(element);
this.contextClass = contextClass;
this.contextAttributes = contextClass != null ? ContextUtils.CONTEXT_CLASS_TO_ATTRIBUTES.get(contextClass) : new HashSet<String>();
}
示例7
@Test
public void testParquetEmptyFile() throws Exception
{
FileContext.getLocalFSFileContext().delete(new Path(new File(testMeta.dir).getAbsolutePath()), true);
List<EventRecord> data = Lists.newArrayList();
writeParquetFile(PARQUET_SCHEMA, new File(testMeta.dir, "data.parquet"), data);
parquetFilePOJOReader.output.setSink(outputSink);
parquetFilePOJOReader.setDirectory(testMeta.dir);
parquetFilePOJOReader.setParquetSchema(PARQUET_SCHEMA);
parquetFilePOJOReader.setup(testMeta.context);
testMeta.portContext.getAttributes().put(Context.PortContext.TUPLE_CLASS, EventRecordV2.class);
parquetFilePOJOReader.output.setup(testMeta.portContext);
for (long wid = 0; wid < 2; wid++) {
parquetFilePOJOReader.beginWindow(0);
parquetFilePOJOReader.emitTuples();
parquetFilePOJOReader.endWindow();
}
Assert.assertEquals("number tuples", 0, outputSink.collectedTuples.size());
parquetFilePOJOReader.teardown();
}
示例8
@Override
protected void starting(Description description)
{
super.starting(description);
operator = new TransformOperator();
sink = new CollectorTestSink<>();
TestUtils.setSink(operator.output, sink);
operator.setup(null);
Attribute.AttributeMap inMap = new Attribute.AttributeMap.DefaultAttributeMap();
inMap.put(Context.PortContext.TUPLE_CLASS, InputClass.class);
operator.input.setup(new PortContext(inMap, null));
Attribute.AttributeMap outMap = new Attribute.AttributeMap.DefaultAttributeMap();
outMap.put(Context.PortContext.TUPLE_CLASS, OutputClass.class);
operator.output.setup(new PortContext(outMap, null));
}
示例9
@Before
public void setup()
{
dag = StramTestSupport.createDAG(testMeta);
dag.setAttribute(Context.DAGContext.STREAMING_WINDOW_SIZE_MILLIS, windowWidthMillis);
dag.setAttribute(Context.DAGContext.HEARTBEAT_TIMEOUT_MILLIS, heartbeatTimeoutMillis);
dag.setAttribute(com.datatorrent.api.Context.OperatorContext.STORAGE_AGENT, new StramTestSupport
.MemoryStorageAgent());
GenericTestOperator o1 = dag.addOperator("o1", GenericTestOperator.class);
GenericTestOperator o2 = dag.addOperator("o2", GenericTestOperator.class);
GenericTestOperator o3 = dag.addOperator("o3", GenericTestOperator.class);
dag.addStream("o1.output1", o1.outport1, o3.inport1);
dag.addStream("o2.output1", o2.outport1, o3.inport2);
scm = new StreamingContainerManager(dag);
PhysicalPlan plan = scm.getPhysicalPlan();
o1p1 = plan.getOperators(dag.getMeta(o1)).get(0);
o2p1 = plan.getOperators(dag.getMeta(o2)).get(0);
o3p1 = plan.getOperators(dag.getMeta(o3)).get(0);
}
示例10
public void setup(PortContext context)
{
setPojoClass(context.getValue(Context.PortContext.TUPLE_CLASS));
columnFieldSetters = Lists.newArrayList();
/**
* Check if the mapping of Generic record fields to POJO is given, else
* use reflection
*/
if (getGenericRecordToPOJOFieldsMapping() == null) {
setFieldInfos(createFieldInfoMap(generateFieldInfoInputs(getPojoClass())));
} else {
setFieldInfos(createFieldInfoMap(getGenericRecordToPOJOFieldsMapping()));
}
initColumnFieldSetters(getFieldInfos());
initializeActiveFieldSetters();
}
示例11
@Test
@Ignore
public void testMetricsAggregations() throws Exception
{
CountDownLatch latch = new CountDownLatch(2);
LogicalPlanConfiguration lpc = new LogicalPlanConfiguration(new Configuration());
TestGeneratorInputOperator inputOperator = dag.addOperator("input", TestGeneratorInputOperator.class);
OperatorWithMetrics o1 = dag.addOperator("o1", OperatorWithMetrics.class);
MockAggregator aggregator = new MockAggregator(latch);
dag.setOperatorAttribute(o1, Context.OperatorContext.METRICS_AGGREGATOR, aggregator);
dag.setAttribute(Context.OperatorContext.STORAGE_AGENT, new StramTestSupport.MemoryStorageAgent());
dag.setOperatorAttribute(o1, Context.OperatorContext.PARTITIONER, new StatelessPartitioner<TestGeneratorInputOperator>(2));
dag.addStream("TestTuples", inputOperator.outport, o1.inport1);
lpc.prepareDAG(dag, null, "AutoMetricTest");
StramLocalCluster lc = new StramLocalCluster(dag);
lc.runAsync();
latch.await();
Assert.assertEquals("progress", 2L, ((Long)aggregator.result.get("progress")).longValue());
lc.shutdown();
}
示例12
@Override
public void populateDAG(DAG dag, Configuration conf)
{
ParquetFilePOJOReader parquetReader = dag.addOperator("parquetReader", getParquetFilePOJOReader());
ConsoleOutputOperator pojoOp = dag.addOperator("pojoOp", new ConsoleOutputOperator());
dag.getMeta(parquetReader).getMeta(parquetReader.output).getAttributes().put(Context.PortContext.TUPLE_CLASS,
EventRecord.class);
dag.addStream("pojo", parquetReader.output, pojoOp.input);
}
示例13
@Override
public void setup(Context.OperatorContext context)
{
if (useUpdatedKeyStorage()) {
updatedKeyStorage.getStore().setup(context);
updatedKeyStorage.setup(context);
}
super.setup(context);
}
示例14
@Override
public void setup(Context.OperatorContext context)
{
// create file name for this partition by appending the operator id to
// the base name
//
long id = context.getId();
fName = fileName + "_p" + id;
super.setup(context);
}
示例15
/**
* This test should set the attribute on the operators and ports.
*/
@Test
public void testApplicationLevelAmbiguousAttributeComplex()
{
testAttributeAmbiguousSimpleHelper(Context.OperatorContext.AUTO_RECORD, Context.PortContext.AUTO_RECORD,
StreamingApplication.APEX_PREFIX + "application" + LogicalPlanConfiguration.KEY_SEPARATOR + "*" + LogicalPlanConfiguration.KEY_SEPARATOR, PortContext.class.getCanonicalName(),
Boolean.TRUE, false, true);
}
示例16
@Override
public void setup(PortContext context)
{
cls = context.getValue(Context.PortContext.TUPLE_CLASS);
try {
parseSchema();
initializeColumnMap(getSchema());
} catch (IOException e) {
LOG.error("Exception in parsing schema", e);
}
}
示例17
private void checkSecurityConfiguration(Configuration conf, boolean[][] securityConf)
{
Assert.assertEquals("Number variations", 5, securityConf.length);
SecurityUtils.init(conf);
checkWebSecurity(securityConf[0][0], securityConf[0][1]);
SecurityUtils.init(conf, Context.StramHTTPAuthentication.ENABLE);
checkWebSecurity(securityConf[1][0], securityConf[1][1]);
SecurityUtils.init(conf, Context.StramHTTPAuthentication.DISABLE);
checkWebSecurity(securityConf[2][0], securityConf[2][1]);
SecurityUtils.init(conf, Context.StramHTTPAuthentication.FOLLOW_HADOOP_AUTH);
checkWebSecurity(securityConf[3][0], securityConf[3][1]);
SecurityUtils.init(conf, Context.StramHTTPAuthentication.FOLLOW_HADOOP_HTTP_AUTH);
checkWebSecurity(securityConf[4][0], securityConf[4][1]);
}
示例18
private RelInfo addJoinFilter(Join join, RexNode remaining, RelInfo relInfo, RelContext context)
{
FilterTransformOperator operator = context.dag
.addOperator(OperatorUtils.getUniqueOperatorName(join.getRelTypeName() + "_Filter"),
FilterTransformOperator.class);
ExpressionCompiler compiler = new ExpressionCompiler(new RexBuilder(join.getCluster().getTypeFactory()));
String expression = compiler.getExpression(remaining, join.getRowType(), join.getRowType());
Map<String, String> expMap = new HashMap<>();
for (Pair<RelDataTypeField, RelDataTypeField> pair : Pair.zip(join.getRowType().getFieldList(),
join.getRowType().getFieldList())) {
String leftName = OperatorUtils.getFieldName(pair.left);
String rightName = OperatorUtils.getFieldName(pair.right);
expMap.put(leftName, rightName);
}
operator.setExpressionMap(expMap);
operator.setCondition(expression);
String streamName = OperatorUtils.getUniqueStreamName(join.getRelTypeName() + "_Join", join.getRelTypeName() +
"_Filter");
Class schema = TupleSchemaRegistry.getSchemaForRelDataType(context.schemaRegistry, streamName,
relInfo.getOutRelDataType());
context.dag.setOutputPortAttribute(relInfo.getOutPort(), Context.PortContext.TUPLE_CLASS, schema);
context.dag.setInputPortAttribute(operator.input, Context.PortContext.TUPLE_CLASS, schema);
context.dag.addStream(streamName, relInfo.getOutPort(), operator.input);
return new RelInfo("Join", relInfo.getInputPorts(), operator, operator.output, join.getRowType());
}
示例19
@Test
public void testShutdownOperatorTimeout() throws Exception
{
GenericTestOperator o1 = dag.addOperator("o1", GenericTestOperator.class);
GenericTestOperator o2 = dag.addOperator("o2", GenericTestOperator.class);
dag.addStream("s1", o1.outport1, o2.inport1);
dag.setAttribute(OperatorContext.STORAGE_AGENT, new MemoryStorageAgent());
dag.setAttribute(Context.DAGContext.STREAMING_WINDOW_SIZE_MILLIS, 50);
dag.setAttribute(OperatorContext.TIMEOUT_WINDOW_COUNT, 1);
StreamingContainerManager scm = new StreamingContainerManager(dag);
PhysicalPlan plan = scm.getPhysicalPlan();
PTOperator p1 = plan.getOperators(dag.getMeta(o1)).get(0);
PTOperator p2 = plan.getOperators(dag.getMeta(o2)).get(0);
shutdownOperator(scm, p1, p2);
scm.monitorHeartbeat(false);
Assert.assertTrue(scm.containerStopRequests.isEmpty());
Thread.sleep(100);
scm.monitorHeartbeat(false);
Assert.assertFalse(scm.containerStopRequests.containsKey(p1.getContainer().getExternalId()));
Assert.assertTrue(scm.containerStopRequests.containsKey(p2.getContainer().getExternalId()));
}
示例20
@Override
public void populateDAG(DAG dag, Configuration conf)
{
InputItemGenerator input = dag.addOperator("InputGenerator", InputItemGenerator.class);
DimensionsComputationFlexibleSingleSchemaPOJO dimensions = dag.addOperator("DimensionsComputation",
DimensionsComputationFlexibleSingleSchemaPOJO.class);
dag.getMeta(dimensions).getAttributes().put(Context.OperatorContext.APPLICATION_WINDOW_COUNT, 10);
DevNull<Object> devNull = dag.addOperator("DevNull", new DevNull<Object>());
//Set input properties
String eventSchema = SchemaUtils.jarResourceFileToString("adsBenchmarkSchema.json");
input.setEventSchemaJSON(eventSchema);
Map<String, String> keyToExpression = Maps.newHashMap();
keyToExpression.put("publisher", "getPublisher()");
keyToExpression.put("advertiser", "getAdvertiser()");
keyToExpression.put("location", "getLocation()");
keyToExpression.put("time", "getTime()");
Map<String, String> aggregateToExpression = Maps.newHashMap();
aggregateToExpression.put("cost", "getCost()");
aggregateToExpression.put("revenue", "getRevenue()");
aggregateToExpression.put("impressions", "getImpressions()");
aggregateToExpression.put("clicks", "getClicks()");
DimensionsComputationUnifierImpl<InputEvent, Aggregate> unifier = new DimensionsComputationUnifierImpl<InputEvent, Aggregate>();
dimensions.setUnifier(unifier);
dimensions.setKeyToExpression(keyToExpression);
dimensions.setAggregateToExpression(aggregateToExpression);
dimensions.setConfigurationSchemaJSON(eventSchema);
dag.addStream("InputStream", input.outputPort, dimensions.input).setLocality(Locality.CONTAINER_LOCAL);
dag.addStream("DimensionalData", dimensions.output, devNull.data);
}
示例21
@Override
public void setup(Context.OperatorContext context)
{
AggregatorRegistry.DEFAULT_AGGREGATOR_REGISTRY.setup();
schema = new DimensionalConfigurationSchema(eventSchema, AggregatorRegistry.DEFAULT_AGGREGATOR_REGISTRY);
super.setup(context);
}
示例22
@Override
public void populateDAG(DAG dag, Configuration conf)
{
TestGeneratorInputOperator input = dag.addOperator("Input", new TestGeneratorInputOperator());
test = dag.addOperator("Test", new DynamicLoader());
dag.addStream("S1", input.outport, test.input);
dag.setAttribute(Context.DAGContext.LIBRARY_JARS, generatedJar);
dag.setInputPortAttribute(test.input, Context.PortContext.TUPLE_CLASS, pojo);
}
示例23
@Override
public void populateDAG(DAG dag, Configuration conf)
{
// Setup the operator to get the data from twitter sample stream injected into the system.
TwitterSampleInput twitterFeed = new TwitterSampleInput();
twitterFeed = dag.addOperator("TweetSampler", twitterFeed);
// Setup the operator to get the URLs extracted from the twitter statuses
TwitterStatusURLExtractor urlExtractor = dag.addOperator("URLExtractor", TwitterStatusURLExtractor.class);
// Setup a node to count the unique urls within a window.
UniqueCounter<String> uniqueCounter = dag.addOperator("UniqueURLCounter", new UniqueCounter<String>());
// Get the aggregated url counts and count them over last 5 mins.
dag.setAttribute(uniqueCounter, Context.OperatorContext.APPLICATION_WINDOW_COUNT, 600);
dag.setAttribute(uniqueCounter, Context.OperatorContext.SLIDE_BY_WINDOW_COUNT, 1);
WindowedTopCounter<String> topCounts = dag.addOperator("TopCounter", new WindowedTopCounter<String>());
topCounts.setTopCount(10);
topCounts.setSlidingWindowWidth(1);
topCounts.setDagWindowWidth(1);
// Feed the statuses from feed into the input of the url extractor.
dag.addStream("TweetStream", twitterFeed.status, urlExtractor.input).setLocality(Locality.CONTAINER_LOCAL);
// Start counting the urls coming out of URL extractor
dag.addStream("TwittedURLs", urlExtractor.url, uniqueCounter.data).setLocality(locality);
// Count unique urls
dag.addStream("UniqueURLCounts", uniqueCounter.count, topCounts.input);
consoleOutput(dag, "topURLs", topCounts.output, SNAPSHOT_SCHEMA, "url");
}
示例24
@Test
public void testMxNMultipleStreamCodecs()
{
GenericTestOperator node1 = dag.addOperator("node1", GenericTestOperator.class);
dag.setOperatorAttribute(node1, Context.OperatorContext.PARTITIONER, new StatelessPartitioner<GenericTestOperator>(2));
GenericTestOperator node2 = dag.addOperator("node2", GenericTestOperator.class);
dag.setOperatorAttribute(node2, Context.OperatorContext.PARTITIONER, new StatelessPartitioner<GenericTestOperator>(3));
TestStreamCodec serDe = new TestStreamCodec();
dag.setInputPortAttribute(node2.inport1, Context.PortContext.STREAM_CODEC, serDe);
GenericTestOperator node3 = dag.addOperator("node3", GenericTestOperator.class);
dag.setOperatorAttribute(node3, Context.OperatorContext.PARTITIONER, new StatelessPartitioner<GenericTestOperator>(3));
TestStreamCodec serDe2 = new TestStreamCodec();
dag.setInputPortAttribute(node3.inport1, Context.PortContext.STREAM_CODEC, serDe2);
dag.addStream("n1n2n3", node1.outport1, node2.inport1, node3.inport1);
dag.setAttribute(LogicalPlan.CONTAINERS_MAX_COUNT, Integer.MAX_VALUE);
StramTestSupport.MemoryStorageAgent msa = new StramTestSupport.MemoryStorageAgent();
dag.setAttribute(Context.OperatorContext.STORAGE_AGENT, msa);
StreamingContainerManager dnm = new StreamingContainerManager(dag);
PhysicalPlan plan = dnm.getPhysicalPlan();
List<PTContainer> containers = plan.getContainers();
for (int i = 0; i < containers.size(); ++i) {
StreamingContainerManagerTest.assignContainer(dnm, "container" + (i + 1));
}
LogicalPlan.OperatorMeta n1meta = dag.getMeta(node1);
LogicalPlan.OperatorMeta n2meta = dag.getMeta(node2);
LogicalPlan.OperatorMeta n3meta = dag.getMeta(node3);
// Sanity check that physical operators have been allocated for n1meta and n2meta
Assert.assertEquals("number operators " + n1meta.getName(), 2, plan.getOperators(n1meta).size());
Assert.assertEquals("number operators " + n2meta.getName(), 3, plan.getOperators(n2meta).size());
Assert.assertEquals("number operators " + n3meta.getName(), 3, plan.getOperators(n3meta).size());
checkMxNStreamCodecs(node1, node2, node3, dnm);
}
示例25
@Test
public void testAttributesCodec()
{
Assert.assertNotSame(null, new Long[] {com.datatorrent.api.Context.DAGContext.serialVersionUID, OperatorContext.serialVersionUID, PortContext.serialVersionUID});
@SuppressWarnings("unchecked")
Set<Class<? extends Context>> contextClasses = Sets.newHashSet(com.datatorrent.api.Context.DAGContext.class, OperatorContext.class, PortContext.class);
for (Class<?> c : contextClasses) {
for (Attribute<Object> attr : AttributeInitializer.getAttributes(c)) {
Assert.assertNotNull(attr.name + " codec", attr.codec);
}
}
}
示例26
@Override
public void activate(Context context)
{
super.activate(context);
if (condition != null) {
conditionExpression = PojoUtils.createExpression(inputClass, condition, Boolean.class,
expressionFunctions.toArray(new String[expressionFunctions.size()]));
}
}
示例27
private void deployNodes(List<OperatorDeployInfo> nodeList) throws IOException
{
for (OperatorDeployInfo ndi : nodeList) {
StorageAgent backupAgent = getValue(OperatorContext.STORAGE_AGENT, ndi);
assert (backupAgent != null);
Context parentContext;
if (ndi instanceof UnifierDeployInfo) {
OperatorContext unifiedOperatorContext = new OperatorContext(0, ndi.name,
((UnifierDeployInfo)ndi).operatorAttributes, containerContext);
parentContext = new PortContext(ndi.inputs.get(0).contextAttributes, unifiedOperatorContext);
massageUnifierDeployInfo(ndi);
} else {
parentContext = containerContext;
}
OperatorContext ctx = new OperatorContext(ndi.id, ndi.name, ndi.contextAttributes, parentContext);
ctx.attributes.put(OperatorContext.ACTIVATION_WINDOW_ID, ndi.checkpoint.windowId);
Node<?> node = reuseOpNodes.get(ndi.id);
if (node == null) {
logger.info("Restoring operator {} to checkpoint {} stateless={}.", ndi.id, Codec.getStringWindowId(ndi.checkpoint.windowId), ctx.stateless);
node = Node.retrieveNode(backupAgent.load(ndi.id, ctx.stateless ? Stateless.WINDOW_ID : ndi.checkpoint.windowId), ctx, ndi.type);
} else {
logger.info("Reusing previous operator instance {}", ndi.id);
node = Node.retrieveNode(node.operator, ctx, ndi.type);
node.setReuseOperator(true);
reuseOpNodes.remove(ndi.id);
}
node.currentWindowId = ndi.checkpoint.windowId;
node.applicationWindowCount = ndi.checkpoint.applicationWindowCount;
node.firstWindowMillis = firstWindowMillis;
node.windowWidthMillis = windowWidthMillis;
node.setId(ndi.id);
nodes.put(ndi.id, node);
logger.debug("Marking operator {} as deployed.", node);
}
}
示例28
@Override
public void setup(Context.OperatorContext context)
{
timeIncrement = context.getValue(Context.OperatorContext.APPLICATION_WINDOW_COUNT) *
context.getValue(Context.DAGContext.STREAMING_WINDOW_SIZE_MILLIS);
super.setup(context);
for (int i = 0; i < 2; i++) {
inputFieldObjects[i] = new FieldObjectMap();
}
}
示例29
@Override
public void populateDAG(DAG dag, Configuration conf)
{
Generator randomGenerator = dag.addOperator("randomGenerator", Generator.class);
DefaultProcessor processor = dag.addOperator("process", DefaultProcessor.class);
ControlAwareReceiver receiver = dag.addOperator("receiver", ControlAwareReceiver.class);
dag.addStream("genToProcessor", randomGenerator.out, processor.input);
dag.addStream("ProcessorToReceiver", processor.output, receiver.input);
dag.setOperatorAttribute(processor, Context.OperatorContext.PARTITIONER, new StatelessPartitioner<>(2));
}
示例30
@Override
public void setup(Context.OperatorContext context)
{
super.setup(context);
startingTime = System.currentTimeMillis();
watermarkTime = System.currentTimeMillis() + 10000;
i = 1;
}