本文整理汇总了Java中com.datatorrent.api.Context.PortContext类的典型用法代码示例。如果您正苦于以下问题:Java PortContext类的具体用法?Java PortContext怎么用?Java PortContext使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
PortContext类属于com.datatorrent.api.Context包,在下文中一共展示了PortContext类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。
示例1: testNumberOfUnifiers
import com.datatorrent.api.Context.PortContext; //导入依赖的package包/类
@Test
public void testNumberOfUnifiers()
{
LogicalPlan dag = new LogicalPlan();
dag.setAttribute(OperatorContext.STORAGE_AGENT, new StramTestSupport.MemoryStorageAgent());
GenericTestOperator node1 = dag.addOperator("node1", GenericTestOperator.class);
GenericTestOperator node2 = dag.addOperator("node2", GenericTestOperator.class);
dag.addStream("node1.outport1", node1.outport1, node2.inport1);
dag.setOperatorAttribute(node1, OperatorContext.PARTITIONER, new StatelessPartitioner<GenericTestOperator>(5));
dag.setOutputPortAttribute(node1.outport1, PortContext.UNIFIER_LIMIT, 3);
PhysicalPlan plan = new PhysicalPlan(dag, new TestPlanContext());
List<PTContainer> containers = plan.getContainers();
int unifierCount = 0;
int totalOperators = 0;
for (PTContainer container : containers) {
List<PTOperator> operators = container.getOperators();
for (PTOperator operator : operators) {
totalOperators++;
if (operator.isUnifier()) {
unifierCount++;
}
}
}
Assert.assertEquals("Number of operators", 8, totalOperators);
Assert.assertEquals("Number of unifiers", 2, unifierCount);
}
开发者ID:apache,项目名称:apex-core,代码行数:27,代码来源:PhysicalPlanTest.java
示例2: testNumberOfUnifiersWithEvenPartitions
import com.datatorrent.api.Context.PortContext; //导入依赖的package包/类
@Test
public void testNumberOfUnifiersWithEvenPartitions()
{
LogicalPlan dag = new LogicalPlan();
dag.setAttribute(OperatorContext.STORAGE_AGENT, new StramTestSupport.MemoryStorageAgent());
GenericTestOperator node1 = dag.addOperator("node1", GenericTestOperator.class);
GenericTestOperator node2 = dag.addOperator("node2", GenericTestOperator.class);
dag.addStream("node1.outport1", node1.outport1, node2.inport1);
dag.setOperatorAttribute(node1, OperatorContext.PARTITIONER, new StatelessPartitioner<GenericTestOperator>(8));
dag.setOutputPortAttribute(node1.outport1, PortContext.UNIFIER_LIMIT, 4);
PhysicalPlan plan = new PhysicalPlan(dag, new TestPlanContext());
List<PTContainer> containers = plan.getContainers();
int unifierCount = 0;
int totalOperators = 0;
for (PTContainer container : containers) {
List<PTOperator> operators = container.getOperators();
for (PTOperator operator : operators) {
totalOperators++;
if (operator.isUnifier()) {
unifierCount++;
}
}
}
Assert.assertEquals("Number of operators", 12, totalOperators);
Assert.assertEquals("Number of unifiers", 3, unifierCount);
}
开发者ID:apache,项目名称:apex-core,代码行数:27,代码来源:PhysicalPlanTest.java
示例3: testParallelPartitionForSlidingWindow
import com.datatorrent.api.Context.PortContext; //导入依赖的package包/类
@Test
public void testParallelPartitionForSlidingWindow()
{
LogicalPlan dag = new LogicalPlan();
dag.setAttribute(OperatorContext.STORAGE_AGENT, new StramTestSupport.MemoryStorageAgent());
GenericTestOperator o1 = dag.addOperator("o1", GenericTestOperator.class);
GenericTestOperator o2 = dag.addOperator("o2", GenericTestOperator.class);
GenericTestOperator o3 = dag.addOperator("o3", GenericTestOperator.class);
dag.setOperatorAttribute(o1, OperatorContext.SLIDE_BY_WINDOW_COUNT, 2);
dag.setOperatorAttribute(o1, OperatorContext.PARTITIONER, new StatelessPartitioner<>(2));
dag.setInputPortAttribute(o2.inport1, PortContext.PARTITION_PARALLEL, true);
dag.setOperatorAttribute(o1, OperatorContext.APPLICATION_WINDOW_COUNT, 4);
dag.addStream("o1.outport1", o1.outport1, o2.inport1);
dag.addStream("o2.outport1", o2.outport1, o3.inport1);
PhysicalPlan plan = new PhysicalPlan(dag, new TestPlanContext());
Assert.assertEquals("number of containers", 7, plan.getContainers().size());
}
开发者ID:apache,项目名称:apex-core,代码行数:20,代码来源:PhysicalPlanTest.java
示例4: populateDAG
import com.datatorrent.api.Context.PortContext; //导入依赖的package包/类
@Override
public void populateDAG(DAG dag, Configuration conf)
{
// RandomEventGenerator rand = dag.addOperator("rand", new RandomEventGenerator());
// rand.setMinvalue(0);
// rand.setMaxvalue(999999);
// rand.setTuplesBlastIntervalMillis(50);
// dag.getMeta(rand).getMeta(rand.integer_data).getAttributes().put(PortContext.QUEUE_CAPACITY, QUEUE_CAPACITY);
IntegerOperator intInput = dag.addOperator("intInput", new IntegerOperator());
StreamDuplicater stream = dag.addOperator("stream", new StreamDuplicater());
dag.getMeta(stream).getMeta(stream.data).getAttributes().put(PortContext.QUEUE_CAPACITY, QUEUE_CAPACITY);
dag.addStream("streamdup1", intInput.integer_data, stream.data).setLocality(locality);
DevNull<Integer> dev1 = dag.addOperator("dev1", new DevNull());
DevNull<Integer> dev2 = dag.addOperator("dev2", new DevNull());
dag.addStream("streamdup2", stream.out1, dev1.data).setLocality(locality);
dag.addStream("streamdup3", stream.out2, dev2.data).setLocality(locality);
}
开发者ID:apache,项目名称:apex-malhar,代码行数:19,代码来源:StreamDuplicaterApp.java
示例5: populateDAG
import com.datatorrent.api.Context.PortContext; //导入依赖的package包/类
@Override
public void populateDAG(DAG dag, Configuration conf)
{
RandomEventGenerator rand = dag.addOperator("rand", new RandomEventGenerator());
rand.setMaxvalue(3000);
rand.setTuplesBlast(120);
RandomMapOutput randMap = dag.addOperator("randMap", new RandomMapOutput());
randMap.setKey("val");
RubyOperator ruby = dag.addOperator("ruby", new RubyOperator());
String setupScript = "def square(val)\n";
setupScript += " return val*val\nend\n";
ruby.addSetupScript(setupScript);
ruby.setInvoke("square");
ruby.setPassThru(true);
ConsoleOutputOperator console = dag.addOperator("console", new ConsoleOutputOperator());
dag.getMeta(console).getMeta(console.input).getAttributes().put(PortContext.QUEUE_CAPACITY, QUEUE_CAPACITY);
dag.getMeta(ruby).getMeta(ruby.result).getAttributes().put(PortContext.QUEUE_CAPACITY, QUEUE_CAPACITY);
dag.addStream("rand_randMap", rand.integer_data, randMap.input).setLocality(Locality.THREAD_LOCAL);
dag.addStream("randMap_ruby", randMap.map_data, ruby.inBindings).setLocality(locality);
dag.addStream("ruby_console", ruby.result, console.input).setLocality(locality);
}
开发者ID:apache,项目名称:apex-malhar,代码行数:26,代码来源:RubyOperatorBenchmarkApplication.java
示例6: populateDAG
import com.datatorrent.api.Context.PortContext; //导入依赖的package包/类
@Override
public void populateDAG(DAG dag, Configuration conf)
{
String filePath = "HDFSOutputOperatorBenchmarkingApp/"
+ System.currentTimeMillis();
dag.setAttribute(DAG.STREAMING_WINDOW_SIZE_MILLIS, 1000);
RandomWordGenerator wordGenerator = dag.addOperator("wordGenerator", RandomWordGenerator.class);
dag.getOperatorMeta("wordGenerator").getMeta(wordGenerator.output)
.getAttributes().put(PortContext.QUEUE_CAPACITY, 10000);
dag.getOperatorMeta("wordGenerator").getAttributes()
.put(OperatorContext.APPLICATION_WINDOW_COUNT, 1);
FSByteOutputOperator hdfsOutputOperator = dag.addOperator("hdfsOutputOperator", new FSByteOutputOperator());
hdfsOutputOperator.setFilePath(filePath);
dag.getOperatorMeta("hdfsOutputOperator").getAttributes()
.put(OperatorContext.COUNTERS_AGGREGATOR, new BasicCounters.LongAggregator<MutableLong>());
dag.addStream("Generator2HDFSOutput", wordGenerator.output, hdfsOutputOperator.input);
}
开发者ID:apache,项目名称:apex-malhar,代码行数:23,代码来源:FSOutputOperatorBenchmark.java
示例7: populateDAG
import com.datatorrent.api.Context.PortContext; //导入依赖的package包/类
@Override
public void populateDAG(DAG dag, Configuration conf)
{
WordCountOperator<HashMap<String, Double>> counterString =
dag.addOperator("counterString", new WordCountOperator<HashMap<String, Double>>());
dag.getMeta(counterString).getMeta(counterString.input).getAttributes()
.put(PortContext.QUEUE_CAPACITY, QUEUE_CAPACITY);
EventClassifierNumberToHashDouble eventClassify =
dag.addOperator("eventClassify", new EventClassifierNumberToHashDouble());
dag.getMeta(eventClassify).getMeta(eventClassify.data)
.getAttributes().put(PortContext.QUEUE_CAPACITY, QUEUE_CAPACITY);
IntegerOperator intInput = dag.addOperator("intInput", new IntegerOperator());
dag.addStream("eventclassifier2", intInput.integer_data, eventClassify.event).setLocality(locality);
dag.addStream("eventclassifier1", eventClassify.data, counterString.input).setLocality(locality);
}
开发者ID:apache,项目名称:apex-malhar,代码行数:17,代码来源:EventClassifierNumberToHashDoubleApp.java
示例8: populateDAG
import com.datatorrent.api.Context.PortContext; //导入依赖的package包/类
@Override
public void populateDAG(DAG dag, Configuration conf)
{
EventGenerator eventGenerator = dag.addOperator("eventGenerator", new EventGenerator());
dag.getMeta(eventGenerator).getMeta(eventGenerator.count).getAttributes()
.put(PortContext.QUEUE_CAPACITY, QUEUE_CAPACITY);
DevNull<String> devString = dag.addOperator("devString", new DevNull());
DevNull<HashMap<String, Double>> devMap = dag.addOperator("devMap", new DevNull());
DevNull<HashMap<String, Number>> devInt = dag.addOperator("devInt", new DevNull());
dag.addStream("EventGenString", eventGenerator.string_data, devString.data).setLocality(locality);
dag.addStream("EventGenMap", eventGenerator.hash_data, devMap.data).setLocality(locality);
dag.addStream("EventGenInt", eventGenerator.count, devInt.data).setLocality(locality);
}
开发者ID:apache,项目名称:apex-malhar,代码行数:17,代码来源:EventGeneratorApp.java
示例9: populateDAG
import com.datatorrent.api.Context.PortContext; //导入依赖的package包/类
@Override
public void populateDAG(DAG dag, Configuration conf)
{
JsonGenerator generator = dag.addOperator("JsonGenerator", JsonGenerator.class);
JsonParser jsonParser = dag.addOperator("jsonParser", JsonParser.class);
CsvFormatter formatter = dag.addOperator("formatter", CsvFormatter.class);
formatter.setSchema(SchemaUtils.jarResourceFileToString(filename));
dag.setInputPortAttribute(formatter.in, PortContext.TUPLE_CLASS, PojoEvent.class);
HDFSOutputOperator<String> hdfsOutput = dag.addOperator("HDFSOutputOperator", HDFSOutputOperator.class);
hdfsOutput.setLineDelimiter("");
dag.addStream("parserStream", generator.out, jsonParser.in);
dag.addStream("formatterStream", jsonParser.out, formatter.in);
dag.addStream("outputStream", formatter.out, hdfsOutput.input);
}
开发者ID:apache,项目名称:apex-malhar,代码行数:19,代码来源:Application.java
示例10: populateDAG
import com.datatorrent.api.Context.PortContext; //导入依赖的package包/类
@Override
public void populateDAG(DAG dag, Configuration conf)
{
JdbcPOJOPollInputOperator poller = dag.addOperator("JdbcPoller", new JdbcPOJOPollInputOperator());
JdbcStore store = new JdbcStore();
poller.setStore(store);
poller.setFieldInfos(addFieldInfos());
FileLineOutputOperator writer = dag.addOperator("Writer", new FileLineOutputOperator());
dag.setInputPortAttribute(writer.input, PortContext.PARTITION_PARALLEL, true);
writer.setRotationWindows(60);
dag.addStream("dbrecords", poller.outputPort, writer.input);
}
开发者ID:apache,项目名称:apex-malhar,代码行数:17,代码来源:JdbcPollerApplication.java
示例11: setup
import com.datatorrent.api.Context.PortContext; //导入依赖的package包/类
public void setup(PortContext context)
{
jsonParser = new JSONParser();
finder = new JsonKeyFinder();
columnFields = new ArrayList<String>();
columnFieldSetters = Lists.newArrayList();
setPojoClass(context.getValue(Context.PortContext.TUPLE_CLASS));
if (getFieldMappingString() == null) {
setFieldInfos(createFieldInfoMap(generateFieldInfoInputs(getPojoClass())));
} else {
setFieldInfos(createFieldInfoMap(getFieldMappingString()));
}
initColumnFieldSetters(getFieldInfos());
initializeActiveFieldSetters();
ListIterator<FieldInfo> itr = fieldInfos.listIterator();
while (itr.hasNext()) {
columnFields.add(itr.next().getColumnName());
}
finder.setMatchKeyList(columnFields);
}
开发者ID:apache,项目名称:apex-malhar,代码行数:24,代码来源:StreamingJsonParser.java
示例12: setup
import com.datatorrent.api.Context.PortContext; //导入依赖的package包/类
public void setup(PortContext context)
{
setPojoClass(context.getValue(Context.PortContext.TUPLE_CLASS));
columnFieldSetters = Lists.newArrayList();
/**
* Check if the mapping of Generic record fields to POJO is given, else
* use reflection
*/
if (getGenericRecordToPOJOFieldsMapping() == null) {
setFieldInfos(createFieldInfoMap(generateFieldInfoInputs(getPojoClass())));
} else {
setFieldInfos(createFieldInfoMap(getGenericRecordToPOJOFieldsMapping()));
}
initColumnFieldSetters(getFieldInfos());
initializeActiveFieldSetters();
}
开发者ID:apache,项目名称:apex-malhar,代码行数:20,代码来源:AvroToPojo.java
示例13: populateDAG
import com.datatorrent.api.Context.PortContext; //导入依赖的package包/类
@Override
public void populateDAG(DAG dag, Configuration conf)
{
BlockWriter blockWriter = dag.addOperator("BlockWriter", new BlockWriter());
Synchronizer synchronizer = dag.addOperator("BlockSynchronizer", new Synchronizer());
dag.setInputPortAttribute(blockWriter.input, PortContext.PARTITION_PARALLEL, true);
dag.setInputPortAttribute(blockWriter.blockMetadataInput, PortContext.PARTITION_PARALLEL, true);
dag.addStream("CompletedBlockmetadata", blockWriter.blockMetadataOutput, synchronizer.blocksMetadataInput);
HDFSFileMerger merger = new HDFSFileMerger();
merger = dag.addOperator("FileMerger", merger);
dag.addStream("MergeTrigger", synchronizer.trigger, merger.input);
merger.setFilePath(outputDirectoryPath);
merger.setOverwriteOnConflict(overwriteOnConflict);
blockWriter.setBlocksDirectory(blocksDirectory);
merger.setBlocksDirectory(blocksDirectory);
filesMetadataInput.set(synchronizer.filesMetadataInput);
blocksMetadataInput.set(blockWriter.blockMetadataInput);
blockData.set(blockWriter.input);
}
开发者ID:apache,项目名称:apex-malhar,代码行数:25,代码来源:HDFSFileCopyModule.java
示例14: populateDAG
import com.datatorrent.api.Context.PortContext; //导入依赖的package包/类
@Override
public void populateDAG(DAG dag, Configuration conf)
{
TestStatsListener sl = new TestStatsListener();
sl.adjustRate = conf.getBoolean("dt.hdsbench.adjustRate", false);
TestGenerator gen = dag.addOperator("Generator", new TestGenerator());
dag.setAttribute(gen, OperatorContext.STATS_LISTENERS, Lists.newArrayList((StatsListener)sl));
TestStoreOperator store = dag.addOperator("Store", new TestStoreOperator());
dag.setAttribute(store, OperatorContext.STATS_LISTENERS, Lists.newArrayList((StatsListener)sl));
FileAccessFSImpl hfa = new HFileImpl();
hfa.setBasePath(this.getClass().getSimpleName());
store.setFileStore(hfa);
dag.setInputPortAttribute(store.input, PortContext.PARTITION_PARALLEL, true);
dag.getOperatorMeta("Store").getAttributes().put(Context.OperatorContext.COUNTERS_AGGREGATOR,
new HDHTWriter.BucketIOStatAggregator());
dag.addStream("Events", gen.data, store.input).setLocality(Locality.THREAD_LOCAL);
}
开发者ID:DataTorrent,项目名称:Megh,代码行数:18,代码来源:HDHTBenchmarkTest.java
示例15: setup
import com.datatorrent.api.Context.PortContext; //导入依赖的package包/类
/** {@inheritDoc} */
@Override
public void setup(PortContext context)
{
if (Boolean.getBoolean(THREAD_AFFINITY_DISABLE_CHECK) == false) {
operatorThread = Thread.currentThread();
logger.debug("Enforcing emit on {}", operatorThread.getName());
}
}
开发者ID:apache,项目名称:apex-core,代码行数:10,代码来源:DefaultOutputPort.java
示例16: setup
import com.datatorrent.api.Context.PortContext; //导入依赖的package包/类
@Override
public void setup(PortContext context)
{
if (inputPort != null) {
inputPort.setup(context);
}
}
开发者ID:apache,项目名称:apex-core,代码行数:8,代码来源:Module.java
示例17: testParallelPartition
import com.datatorrent.api.Context.PortContext; //导入依赖的package包/类
@Test
public void testParallelPartition() throws Exception
{
TestInputOperator i1 = new TestInputOperator();
dag.addOperator("i1", i1);
dag.setOperatorAttribute(i1, OperatorContext.PARTITIONER, new StatelessPartitioner<GenericTestOperator>(2));
GenericTestOperator op1 = new GenericTestOperator();
dag.addOperator("op1", op1);
dag.setInputPortAttribute(op1.inport1, PortContext.PARTITION_PARALLEL, true);
TestOutputOperator op2 = new TestOutputOperator();
dag.addOperator("op2", op2);
dag.addStream("s1", i1.output, op1.inport1);
dag.addStream("s2", op1.outport1, op2.inport);
StreamingContainerManager scm = new StreamingContainerManager(dag);
PhysicalPlan physicalPlan = scm.getPhysicalPlan();
List<PTContainer> containers = physicalPlan.getContainers();
Assert.assertEquals("Number of containers", 5, containers.size());
assignContainers(scm, containers);
testOutputAttribute(dag, i1, scm, physicalPlan, false);
testOutputAttribute(dag, op1, scm, physicalPlan, true);
}
开发者ID:apache,项目名称:apex-core,代码行数:30,代码来源:OutputUnifiedTest.java
示例18: testOutputAttribute
import com.datatorrent.api.Context.PortContext; //导入依赖的package包/类
private void testOutputAttribute(LogicalPlan dag, Operator operator, StreamingContainerManager scm, PhysicalPlan physicalPlan, boolean result)
{
List<PTOperator> ptOperators = physicalPlan.getOperators(dag.getMeta(operator));
for (PTOperator ptOperator : ptOperators) {
PTContainer container = ptOperator.getContainer();
StreamingContainerAgent agent = scm.getContainerAgent("container" + container.getId());
List<OperatorDeployInfo> deployInfoList = agent.getDeployInfoList(container.getOperators());
Assert.assertEquals("Deploy info size", 1, deployInfoList.size());
Assert.assertEquals("Is output unified", deployInfoList.get(0).outputs.get(0).getAttributes().get(PortContext.IS_OUTPUT_UNIFIED), result);
}
}
开发者ID:apache,项目名称:apex-core,代码行数:12,代码来源:OutputUnifiedTest.java
示例19: testPortLevelAttributes
import com.datatorrent.api.Context.PortContext; //导入依赖的package包/类
@Test
@SuppressWarnings("UnnecessaryBoxing")
public void testPortLevelAttributes()
{
String appName = "app1";
SimpleTestApplication app = new SimpleTestApplication();
Properties props = new Properties();
props.put(StreamingApplication.APEX_PREFIX + "application." + appName + ".class", app.getClass().getName());
props.put(StreamingApplication.APEX_PREFIX + "application." + appName + ".operator.operator1.port.*." + PortContext.QUEUE_CAPACITY.getName(), "" + 16 * 1024);
props.put(StreamingApplication.APEX_PREFIX + "application." + appName + ".operator.operator2.inputport.inport1." + PortContext.QUEUE_CAPACITY.getName(), "" + 32 * 1024);
props.put(StreamingApplication.APEX_PREFIX + "application." + appName + ".operator.operator2.outputport.outport1." + PortContext.QUEUE_CAPACITY.getName(), "" + 32 * 1024);
props.put(StreamingApplication.APEX_PREFIX + "application." + appName + ".operator.operator3.port.*." + PortContext.QUEUE_CAPACITY.getName(), "" + 16 * 1024);
props.put(StreamingApplication.APEX_PREFIX + "application." + appName + ".operator.operator3.inputport.inport2." + PortContext.QUEUE_CAPACITY.getName(), "" + 32 * 1024);
LogicalPlanConfiguration dagBuilder = new LogicalPlanConfiguration(new Configuration(false));
dagBuilder.addFromProperties(props, null);
String appPath = app.getClass().getName().replace(".", "/") + ".class";
LogicalPlan dag = new LogicalPlan();
dagBuilder.prepareDAG(dag, app, appPath);
OperatorMeta om1 = dag.getOperatorMeta("operator1");
Assert.assertEquals("", Integer.valueOf(16 * 1024), om1.getMeta(app.gt1.outport1).getValue(PortContext.QUEUE_CAPACITY));
OperatorMeta om2 = dag.getOperatorMeta("operator2");
Assert.assertEquals("", Integer.valueOf(32 * 1024), om2.getMeta(app.gt2.inport1).getValue(PortContext.QUEUE_CAPACITY));
Assert.assertEquals("", Integer.valueOf(32 * 1024), om2.getMeta(app.gt2.outport1).getValue(PortContext.QUEUE_CAPACITY));
OperatorMeta om3 = dag.getOperatorMeta("operator3");
Assert.assertEquals("", Integer.valueOf(16 * 1024), om3.getMeta(app.gt3.inport1).getValue(PortContext.QUEUE_CAPACITY));
Assert.assertEquals("", Integer.valueOf(32 * 1024), om3.getMeta(app.gt3.inport2).getValue(PortContext.QUEUE_CAPACITY));
}
开发者ID:apache,项目名称:apex-core,代码行数:33,代码来源:LogicalPlanConfigurationTest.java
示例20: testAttributesCodec
import com.datatorrent.api.Context.PortContext; //导入依赖的package包/类
@Test
public void testAttributesCodec()
{
Assert.assertNotSame(null, new Long[] {com.datatorrent.api.Context.DAGContext.serialVersionUID, OperatorContext.serialVersionUID, PortContext.serialVersionUID});
@SuppressWarnings("unchecked")
Set<Class<? extends Context>> contextClasses = Sets.newHashSet(com.datatorrent.api.Context.DAGContext.class, OperatorContext.class, PortContext.class);
for (Class<?> c : contextClasses) {
for (Attribute<Object> attr : AttributeInitializer.getAttributes(c)) {
Assert.assertNotNull(attr.name + " codec", attr.codec);
}
}
}
开发者ID:apache,项目名称:apex-core,代码行数:13,代码来源:LogicalPlanConfigurationTest.java
注:本文中的com.datatorrent.api.Context.PortContext类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论