本文整理汇总了Python中pylearn2.utils.iteration.resolve_iterator_class函数的典型用法代码示例。如果您正苦于以下问题:Python resolve_iterator_class函数的具体用法?Python resolve_iterator_class怎么用?Python resolve_iterator_class使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了resolve_iterator_class函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: __init__
def __init__(self, which_set, path=None):
self.mapper = {"train": 0, "valid": 1, "test": 2}
assert which_set in self.mapper.keys()
self.__dict__.update(locals())
del self.self
if path is not None:
raise NotImplementedError("Data path is the current directory.")
# load data
file_n = "click_data.h5"
self.h5file = tables.open_file(file_n, mode="r")
if which_set == "test":
test_group = self.h5file.root.test.test_raw
self.X = test_group.X_t
self.y = None
else:
train_group = self.h5file.root.train.train_raw
if which_set == "train":
self.X = train_group.X_train
self.y = train_group.y_train
else:
self.X = train_group.X_valid
self.y = train_group.y_valid
self.samples = slice(0, self.X.shape[0])
self.sample_index = self.samples.start
self.examples = self.X.shape[0]
max_labels = 2
X_source = "features"
X_space = VectorSpace(dim=23)
if self.y is None:
space = X_space
source = X_source
else:
y_space = IndexSpace(dim=1, max_labels=max_labels)
y_source = "targets"
space = CompositeSpace((X_space, y_space))
source = (X_source, y_source)
self.data_specs = (space, source)
self.X_space = X_space
self._iter_mode = resolve_iterator_class("sequential")
self._iter_topo = False
self._iter_targets = False
self._iter_data_specs = (self.X_space, "features")
self._iter_subset_class = resolve_iterator_class("even_sequential")
开发者ID:gau2112,项目名称:kaggle-click,代码行数:54,代码来源:dataclassraw.py
示例2: __init__
def __init__(self, min_x=-6.28, max_x=6.28, std=.05, rng=_default_seed):
"""
Constructor.
"""
super(CosDataset, self).__init__()
#: lower limit for x as in cos(x)
self.min_x = min_x
#: higher limit for x as in cos(x)
self.max_x = max_x
#: standard deviation for the noise added to the values we generate
self.std = std
# argument to resolve_iterator_class() can be either
# a string from [sequential, shuffled_sequential, random_slice,
# random_uniform, batchwise_shuffled_sequential, even_sequential,
# even_shuffled_sequential, even_batchwise_shuffled_sequential,
# even_sequences] or a SubsetIterator sublass.
#: default iterator implementation (a class to be instantiated)
self._iter_subset_class = resolve_iterator_class('sequential')
#: default data specifications for iterator
self._iter_data_specs = (VectorSpace(2), 'features')
#: default batch size for the iterator
self._iter_batch_size = 100
#: default number of batches for the iterator
self._iter_num_batches = 10
#: random number generator
self.rng = make_np_rng(rng, which_method=['uniform', 'randn'])
开发者ID:TNick,项目名称:pyl2extra,代码行数:35,代码来源:cos_dataset.py
示例3: iterator
def iterator(self, mode=None, batch_size=None, num_batches=None,
rng=None, data_specs=None, return_tuple=False):
if mode is None:
if hasattr(self, '_iter_subset_class'):
mode = self._iter_subset_class
raise ValueError('iteration mode not provided and no default '
'mode set for %s' % str(self))
else:
mode = resolve_iterator_class(mode)
if batch_size is None:
batch_size = getattr(self, '_iter_batch_size', None)
if num_batches is None:
num_batches = getattr(self, '_iter_num_batches', None)
if rng is None and mode.stochastic:
rng = self.rng
if data_specs is None:
data_specs = getattr(self, '_iter_data_specs', None)
return FiniteDatasetIterator(
self,
mode(self.n_samples,
batch_size,
num_batches,
rng),
data_specs=data_specs,
return_tuple=return_tuple)
开发者ID:LeonBai,项目名称:lisa_emotiw,代码行数:27,代码来源:facetubes.py
示例4: iterator
def iterator(self, mode=None, batch_size=None, num_batches=None,
topo=None, targets=None, rng=None):
# TODO: Refactor, deduplicate with set_iteration_scheme
if mode is None:
if hasattr(self, '_iter_subset_class'):
mode = self._iter_subset_class
else:
raise ValueError('iteration mode not provided and no default '
'mode set for %s' % str(self))
else:
mode = resolve_iterator_class(mode)
if batch_size is None:
batch_size = getattr(self, '_iter_batch_size', None)
if num_batches is None:
num_batches = getattr(self, '_iter_num_batches', None)
if topo is None:
topo = getattr(self, '_iter_topo', False)
if targets is None:
targets = getattr(self, '_iter_targets', False)
if rng is None and mode.stochastic:
rng = self.rng
return FiniteDatasetIterator(self,
mode(self.X.shape[0], batch_size,
num_batches, rng),
topo, targets)
开发者ID:doorjuice,项目名称:pylearn,代码行数:25,代码来源:dense_design_matrix.py
示例5: iterator
def iterator(self, mode=None, batch_size=None, num_batches=None,
rng=None, data_specs=None, return_tuple=False):
"""
Method inherited from `pylearn2.datasets.dataset.Dataset`.
"""
self.mode = mode
self.batch_size = batch_size
self._return_tuple = return_tuple
# TODO: If there is a view_converter, we have to use it to convert
# the stored data for "features" into one that the iterator can return.
space, source = data_specs or (self.X_space, 'features')
assert isinstance(space, CompositeSpace),\
"Unexpected input space for the data."
sub_spaces = space.components
sub_sources = source
conv_fn = lambda x: x.todense().astype(theano.config.floatX)
convert = []
for sp, src in safe_zip(sub_spaces, sub_sources):
convert.append(conv_fn if src in ('features', 'targets') else None)
assert mode is not None,\
"Iteration mode not provided for %s" % str(self)
mode = resolve_iterator_class(mode)
subset_iterator = mode(self.X.shape[0], batch_size, num_batches, rng)
return FiniteDatasetIterator(self,
subset_iterator,
data_specs=data_specs,
return_tuple=return_tuple,
convert=convert)
开发者ID:BrianMiner,项目名称:scikit-neuralnetwork,代码行数:32,代码来源:dataset.py
示例6: __init__
def __init__(self, data=None, data_specs=None, rng=_default_seed,
preprocessor=None, fit_preprocessor=False):
# data_specs should be flat, and there should be no
# duplicates in source, as we keep only one version
assert is_flat_specs(data_specs)
if isinstance(data_specs[1], tuple):
assert sorted(set(data_specs[1])) == sorted(data_specs[1])
space, source = data_specs
space.np_validate(data)
# TODO: assume that data[0] is num example => error if channel in c01b
# assert len(set(elem.shape[0] for elem in list(data))) <= 1
self.data = data
self.data_specs = data_specs
# TODO: assume that data[0] is num example => error if channel in c01b
self.num_examples = list(data)[-1].shape[0] # TODO: list(data)[0].shape[0]
self.compress = False
self.design_loc = None
self.rng = make_np_rng(rng, which_method='random_integers')
# Defaults for iterators
self._iter_mode = resolve_iterator_class('sequential')
if preprocessor:
preprocessor.apply(self, can_fit=fit_preprocessor)
self.preprocessor = preprocessor
开发者ID:Dining-Engineers,项目名称:Multi-Column-Deep-Neural-Network,代码行数:25,代码来源:vector_spaces_dataset_c01b.py
示例7: iterator
def iterator(self, mode=None, batch_size=None, num_batches=None,
rng=None, data_specs=None, return_tuple=False):
allowed_modes = ('sequential', 'random_slice', 'even_sequential',
'batchwise_shuffled_sequential',
'even_batchwise_shuffled_sequential')
if mode is not None and mode not in allowed_modes:
raise ValueError("Due to HDF5 limitations on advanced indexing, " +
"the '" + mode + "' iteration mode is not " +
"supported")
if data_specs is None:
data_specs = self._iter_data_specs
space, source = data_specs
sub_spaces, sub_sources = (
(space.components, source) if isinstance(space, CompositeSpace)
else ((space,), (source,)))
convert = [None for sp, src in safe_izip(sub_spaces, sub_sources)]
mode = (self._iter_subset_class if mode is None
else resolve_iterator_class(mode))
if batch_size is None:
batch_size = getattr(self, '_iter_batch_size', None)
if num_batches is None:
num_batches = getattr(self, '_iter_num_batches', None)
if rng is None and mode.stochastic:
rng = self.rng
return VariableImageDatasetIterator(
dataset=self,
subset_iterator=mode(
self.num_examples, batch_size, num_batches, rng),
data_specs=data_specs,
return_tuple=return_tuple,
convert=convert)
开发者ID:amiltonwong,项目名称:ift6266h15,代码行数:35,代码来源:variable_image_dataset.py
示例8: _create_subset_iterator
def _create_subset_iterator(self, mode, batch_size=None, num_batches=None,
rng=None):
subset_iterator = resolve_iterator_class(mode)
if rng is None and subset_iterator.stochastic:
rng = make_np_rng()
return subset_iterator(self.get_num_examples(), batch_size,
num_batches, rng)
开发者ID:123fengye741,项目名称:pylearn2,代码行数:7,代码来源:penntree.py
示例9: __init__
def __init__(self, which_set='debug', start=None, end=None, shuffle=True,
lazy_load=False, rng=_default_seed):
assert which_set in ['debug', 'train', 'test']
if which_set == 'debug':
maxlen, n_samples, n_annotations, n_features = 10, 12, 13, 14
X = N.zeros(shape=(n_samples, maxlen))
X_mask = X # same with X
Z = N.zeros(shape=(n_annotations, n_samples, n_features))
elif which_set == 'train':
pass
else:
pass
self.X, self.X_mask, self.Z = (X, X_mask, Z)
self.sources = ('features', 'target')
self.spaces = CompositeSpace([
SequenceSpace(space=VectorSpace(dim=self.X.shape[1])),
SequenceDataSpace(space=VectorSpace(dim=self.Z.shape[-1]))
])
self.data_spces = (self.spaces, self.sources)
# self.X_space, self.X_mask_space, self.Z_space
# Default iterator
self._iter_mode = resolve_iterator_class('sequential')
self._iter_topo = False
self._iter_target = False
self._iter_data_specs = self.data_spces
self.rng = make_np_rng(rng, which_method='random_intergers')
开发者ID:EugenePY,项目名称:tensor-work,代码行数:29,代码来源:im2latex.py
示例10: iterator
def iterator(self, mode=None, batch_size=None, num_batches=None,
rng=None, data_specs=None,
return_tuple=False):
"""
Copied from dense_design_matrix, in order to fix uneven problem.
"""
if data_specs is None:
data_specs = self._iter_data_specs
# If there is a view_converter, we have to use it to convert
# the stored data for "features" into one that the iterator
# can return.
space, source = data_specs
if isinstance(space, CompositeSpace):
sub_spaces = space.components
sub_sources = source
else:
sub_spaces = (space,)
sub_sources = (source,)
convert = []
for sp, src in safe_zip(sub_spaces, sub_sources):
if src == 'features' and \
getattr(self, 'view_converter', None) is not None:
conv_fn = (lambda batch, self=self, space=sp:
self.view_converter.get_formatted_batch(batch,
space))
else:
conv_fn = None
convert.append(conv_fn)
# TODO: Refactor
if mode is None:
if hasattr(self, '_iter_subset_class'):
mode = self._iter_subset_class
else:
raise ValueError('iteration mode not provided and no default '
'mode set for %s' % str(self))
else:
mode = resolve_iterator_class(mode)
if batch_size is None:
batch_size = getattr(self, '_iter_batch_size', None)
if num_batches is None:
num_batches = getattr(self, '_iter_num_batches', None)
if rng is None and mode.stochastic:
rng = self.rng
# hack to make the online augmentations run
FiniteDatasetIterator.uneven = False
iterator = FiniteDatasetIterator(self,
mode(self.X.shape[0],
batch_size,
num_batches,
rng),
data_specs=data_specs,
return_tuple=return_tuple,
convert=convert)
return iterator
开发者ID:Neuroglycerin,项目名称:neukrill-net-tools,代码行数:60,代码来源:dense_dataset.py
示例11: __init__
def __init__(self, which_set, context_len, data_mode, shuffle=True):
self.__dict__.update(locals())
del self.self
# Load data into self._data (defined in PennTreebank)
self._load_data(which_set, context_len, data_mode)
print self._raw_data[0:30]
print self._data[:, :-1][:10]
print "_____________"
print self._data[:, -1:][:10]
super(PennTreebank_NGrams, self).__init__(
X=self._data[:, :-1],
y=self._data[:, -1:],
X_labels=10000, y_labels=10000
)
if shuffle:
warnings.warn("Note that the PennTreebank samples are only "
"shuffled when the iterator method is used to "
"retrieve them.")
self._iter_subset_class = resolve_iterator_class(
'shuffled_sequential'
)
开发者ID:ktho22,项目名称:pylearn2,代码行数:25,代码来源:penntree.py
示例12: iterator
def iterator(self, mode=None, batch_size=None, num_batches=None,
topo=None, targets=None, rng=None, data_specs=None,
return_tuple=False):
if topo is not None or targets is not None:
raise ValueError("You should use the new interface iterator")
if mode is None:
if hasattr(self, '_iter_subset_class'):
mode = self._iter_subset_class
else:
raise ValueError('iteration mode not provided and no default '
'mode set for %s' % str(self))
else:
mode = resolve_iterator_class(mode)
if batch_size is None:
batch_size = getattr(self, '_iter_batch_size', None)
if num_batches is None:
num_batches = getattr(self, '_iter_num_batches', None)
if rng is None and mode.stochastic:
rng = self.rng
if data_specs is None:
data_specs = self.data_specs
return FiniteDatasetIterator(
self,
mode(self.get_num_examples(),
batch_size, num_batches, rng),
data_specs=data_specs, return_tuple=return_tuple
)
开发者ID:AlexArgus,项目名称:pylearn2,代码行数:30,代码来源:vector_spaces_dataset.py
示例13: __init__
def __init__(self, which_set, context_len, data_mode, shuffle=True):
self.__dict__.update(locals())
del self.self
# Load data into self._data (defined in PennTreebank)
self._load_data(which_set, context_len, data_mode)
self._data = as_strided(self._raw_data,
shape=(len(self._raw_data) - context_len,
context_len + 1),
strides=(self._raw_data.itemsize,
self._raw_data.itemsize))
super(PennTreebankNGrams, self).__init__(
X=self._data[:, :-1],
y=self._data[:, -1:],
X_labels=self._max_labels, y_labels=self._max_labels
)
if shuffle:
warnings.warn("Note that the PennTreebank samples are only "
"shuffled when the iterator method is used to "
"retrieve them.")
self._iter_subset_class = resolve_iterator_class(
'shuffled_sequential'
)
开发者ID:123fengye741,项目名称:pylearn2,代码行数:26,代码来源:penntree.py
示例14: iterator
def iterator(self, mode=None, batch_size=None, num_batches=None,
rng=None, data_specs=None,
return_tuple=False):
if data_specs is None:
data_specs = self._iter_data_specs
# If there is a view_converter, we have to use it to convert
# the stored data for "features" into one that the iterator
# can return.
space, source = data_specs
if isinstance(space, CompositeSpace):
sub_spaces = space.components
sub_sources = source
else:
sub_spaces = (space,)
sub_sources = (source,)
convert = []
for sp, src in safe_zip(sub_spaces, sub_sources):
if (src == "features"
and getattr(self, "view_converter", None) is not None):
if self.distorter is None:
conv_fn = (lambda batch, self=self, space=sp:
self.view_converter.get_formatted_batch(batch, space))
else:
conv_fn = (lambda batch, self=self, space=sp:
self.distorter._distort(
self.view_converter.get_formatted_batch(batch,
space)))
else:
conv_fn = None
convert.append(conv_fn)
# TODO: Refactor
if mode is None:
if hasattr(self, "_iter_subset_class"):
mode = self._iter_subset_class
else:
raise ValueError("iteration mode not provided and no default "
"mode set for %s" % str(self))
else:
mode = resolve_iterator_class(mode)
if batch_size is None:
batch_size = getattr(self, "_iter_batch_size", None)
if num_batches is None:
num_batches = getattr(self, "_iter_num_batches", None)
if rng is None and mode.stochastic:
rng = self.rng
return FiniteDatasetIterator(self,
mode(self.X.shape[0],
batch_size,
num_batches,
rng),
data_specs=data_specs,
return_tuple=return_tuple,
convert=convert)
开发者ID:ecastrow,项目名称:pl2mind,代码行数:59,代码来源:MRI.py
示例15: __init__
def __init__(self, X=None, topo_view=None, y=None, tags=None,
view_converter=None, axes = ('b', 0, 1, 'c'),
rng=_default_seed, preprocessor = None, fit_preprocessor=False):
"""
Parameters
----------
X : ndarray, 2-dimensional, optional
Should be supplied if `topo_view` is not. A design
matrix of shape (number examples, number features)
that defines the dataset.
topo_view : ndarray, optional
Should be supplied if X is not. An array whose first
dimension is of length number examples. The remaining
dimensions are examples with topological significance,
e.g. for images the remaining axes are rows, columns,
and channels.
y : ndarray, 1-dimensional(?), optional
Labels or targets for each example. The semantics here
are not quite nailed down for this yet.
tags: ndarray, optional
First dimension is the number of examples, other dimensions
contain extra information about the examples. Used to keep
track of position information for randomly cropped patches.
view_converter : object, optional
An object for converting between design matrices and
topological views. Currently DefaultViewConverter is
the only type available but later we may want to add
one that uses the retina encoding that the U of T group
uses.
rng : object, optional
A random number generator used for picking random
indices into the design matrix when choosing minibatches.
"""
self.X = X
if view_converter is not None:
assert topo_view is None
self.view_converter = view_converter
else:
if topo_view is not None:
self.set_topological_view(topo_view, axes)
self.y = y
self.tags = tags
self.compress = False
self.design_loc = None
if hasattr(rng, 'random_integers'):
self.rng = rng
else:
self.rng = np.random.RandomState(rng)
# Defaults for iterators
self._iter_mode = resolve_iterator_class('sequential')
self._iter_topo = False
self._iter_targets = False
if preprocessor:
preprocessor.apply(self, can_fit=fit_preprocessor)
self.preprocessor = preprocessor
开发者ID:capybaralet,项目名称:current,代码行数:57,代码来源:dense_design_matrix.py
示例16: iterator
def iterator(self, mode=None, batch_size=None, num_batches=None,
topo=None, targets=None, rng=None, data_specs=None,
return_tuple=False):
"""
method inherited from Dataset
"""
self.mode = mode
self.batch_size = batch_size
self._targets = targets
self._return_tuple = return_tuple
if data_specs is None:
data_specs = self._iter_data_specs
# If there is a view_converter, we have to use it to convert
# the stored data for "features" into one that the iterator
# can return.
# if
space, source = data_specs
if isinstance(space, CompositeSpace):
sub_spaces = space.components
sub_sources = source
else:
sub_spaces = (space,)
sub_sources = (source,)
convert = []
for sp, src in safe_zip(sub_spaces, sub_sources):
if src == 'features':
conv_fn = lambda x: x.todense()
elif src == 'targets':
conv_fn = lambda x: x
else:
conv_fn = None
convert.append(conv_fn)
if mode is None:
if hasattr(self, '_iter_subset_class'):
mode = self._iter_subset_class
else:
raise ValueError('iteration mode not provided and no default '
'mode set for %s' % str(self))
else:
mode = resolve_iterator_class(mode)
return FiniteDatasetIterator(self,
mode(self.X.shape[0],
batch_size,
num_batches,
rng),
data_specs=data_specs,
return_tuple=return_tuple,
convert=convert)
开发者ID:ndronen,项目名称:pylearnutils,代码行数:54,代码来源:sparse_expander.py
示例17: iterator
def iterator(self, mode=None, batch_size=None, num_batches=None,
rng=None, data_specs=None, return_tuple=False):
"""
.. todo::
WRITEME
"""
if data_specs is None:
data_specs = self._iter_data_specs
# If there is a view_converter, we have to use it to convert
# the stored data for "features" into one that the iterator
# can return.
space, source = data_specs
if isinstance(space, CompositeSpace):
sub_spaces = space.components
sub_sources = source
else:
sub_spaces = (space,)
sub_sources = (source,)
convert = []
for sp, src in safe_zip(sub_spaces, sub_sources):
convert.append(None)
# TODO: Refactor
if mode is None:
if hasattr(self, '_iter_subset_class'):
mode = self._iter_subset_class
else:
raise ValueError('iteration mode not provided and no default '
'mode set for %s' % str(self))
else:
mode = resolve_iterator_class(mode)
if batch_size is None:
batch_size = getattr(self, '_iter_batch_size', None)
if num_batches is None:
num_batches = getattr(self, '_iter_num_batches', None)
if rng is None and mode.stochastic:
rng = self.rng
if self.noise != False:
lengths = map( lambda x: len(x), self.samples_sequences )
self.noise_this_epoch = map( lambda length: numpy.random.normal( 0, self.noise, (length,1) ), lengths )
return FiniteDatasetIterator(self,
mode(self.num_examples, batch_size,
num_batches, rng),
data_specs=data_specs,
return_tuple=return_tuple,
convert=convert)
开发者ID:davidtob,项目名称:research,代码行数:52,代码来源:timit.py
示例18: iterator
def iterator(self, mode=None, batch_size=1, num_batches=None,
rng=None, data_specs=None, return_tuple=False):
if num_batches is None:
num_batches = len(self.X1) / (batch_size)
mode = resolve_iterator_class(mode)
i = FiniteDatasetIterator(
self,
mode(len(self.X1), batch_size, num_batches, rng),
data_specs=data_specs,
)
return i
开发者ID:zseder,项目名称:hunvec,代码行数:13,代码来源:word_tagger_dataset.py
示例19: iterator
def iterator(self, mode="sequential", batch_size=None, num_batches=None, rng=None):
"""
Method inherited from the Dataset.
"""
if batch_size is None and mode == "sequential":
batch_size = 100 # Has to be big enough or we'll never pick anything.
self.batch_size = batch_size
self.mode = resolve_iterator_class(mode)
self.subset_iterator = self.mode(self.total_n_exs, batch_size, num_batches, rng=None)
return EmotiwArrangerIter(self, self.subset_iterator, batch_size=batch_size)
开发者ID:YangXS,项目名称:lisa_emotiw,代码行数:13,代码来源:arrangement_generator.py
示例20: __init__
def __init__(self, data=None, data_specs=None, rng=_default_seed,
preprocessor=None, fit_preprocessor=False):
"""
Parameters
----------
data: ndarray, or tuple of ndarrays, containing the data.
It is formatted as specified in `data_specs`.
For instance, if `data_specs` is (VectorSpace(nfeat), 'features'),
then `data` has to be a 2-d ndarray, of shape (nb examples,
nfeat), that defines an unlabeled dataset. If `data_specs`
is (CompositeSpace(Conv2DSpace(...), VectorSpace(1)),
('features', 'target')), then `data` has to be an (X, y) pair,
with X being an ndarray containing images stored in the topological
view specified by the `Conv2DSpace`, and y being a 2-D ndarray
of width 1, containing the labels or targets for each example.
data_specs: A (space, source) pair, where space is an instance of
`Space` (possibly a `CompositeSpace`), and `source` is a
string (or tuple of strings, if `space` is a `CompositeSpace`),
defining the format and labels associated to `data`.
rng : object, optional
A random number generator used for picking random
indices into the design matrix when choosing minibatches.
preprocessor: WRITEME
fit_preprocessor: WRITEME
"""
# data_specs should be flat, and there should be no
# duplicates in source, as we keep only one version
assert is_flat_specs(data_specs)
if isinstance(data_specs[1], tuple):
assert sorted(set(data_specs[1])) == sorted(data_specs[1])
self.data = data
self.data_specs = data_specs
self.compress = False
self.design_loc = None
if hasattr(rng, 'random_integers'):
self.rng = rng
else:
self.rng = np.random.RandomState(rng)
# Defaults for iterators
self._iter_mode = resolve_iterator_class('sequential')
if preprocessor:
preprocessor.apply(self, can_fit=fit_preprocessor)
self.preprocessor = preprocessor
开发者ID:Alienfeel,项目名称:pylearn2,代码行数:49,代码来源:vector_spaces_dataset.py
注:本文中的pylearn2.utils.iteration.resolve_iterator_class函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论