本文整理汇总了Python中pylearn2.utils.sharedX函数的典型用法代码示例。如果您正苦于以下问题:Python sharedX函数的具体用法?Python sharedX怎么用?Python sharedX使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了sharedX函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: bench
def bench(f, m, n):
#print f
rng = np.random.RandomState([2012,9,11])
X = sharedX(rng.randn(m,n))
Y = sharedX(X.get_value())
func = theano.function([], updates = { Y : f(X) })
nodes = func.maker.fgraph.toposort()
# Make sure the optimizations haven't made us benchmark something different from what we intend
if f is my_softmax:
assert True not in [ isinstance(node.op, theano.tensor.nnet.Softmax) for node in nodes ]
if f is softmax_op:
assert True in [ isinstance(node.op, theano.tensor.nnet.Softmax) for node in nodes ]
if f is softmax_with_bias:
assert True in [ isinstance(node.op, theano.tensor.nnet.SoftmaxWithBias) for node in nodes ]
# warm up
for i in xrange(5):
func()
# actual time
times = []
for i in xrange(5):
t1 = time.time()
func()
t2 = time.time()
times.append(t2-t1)
rval = np.asarray(times).mean()
#print rval
return rval
开发者ID:cc13ny,项目名称:galatea,代码行数:35,代码来源:bench_softmax.py
示例2: set_input_space
def set_input_space(self, space):
self.input_space = space
if isinstance(space, VectorSpace):
self.requires_reformat = False
self.input_dim = space.dim
else:
self.requires_reformat = True
self.input_dim = space.get_total_dimension()
self.desired_space = VectorSpace(self.input_dim)
self.output_space = VectorSpace(self.dim)
self.input_dims = [self.input_dim, self.input_dim, self.hidden_dim]
self.output_dims = [self.dim, self.hidden_dim, self.gater_dim]
self.W = [None,None,None]
self.b = [None,None,None]
for i in range(3):
self._init_inner_layer(i)
self.stoch_grad = sharedX(0)
self.kl_grad = sharedX(0)
self.linear_grad = sharedX(0)
开发者ID:nicholas-leonard,项目名称:delicious,代码行数:25,代码来源:group_gater.py
示例3: _init_inner_layer
def _init_inner_layer(self, idx):
rng = self.mlp.rng
if self.irange[idx] is not None:
assert self.istdev[idx] is None
assert self.sparse_init[idx] is None
W = rng.uniform(-self.irange[idx], self.irange[idx],
(self.input_dims[idx], self.output_dims[idx]))
elif self.istdev[idx] is not None:
assert self.sparse_init[idx] is None
W = rng.randn(self.input_dims[idx], self.output_dims[idx]) \
* self.istdev[idx]
else:
assert self.sparse_init[idx] is not None
W = np.zeros((self.input_dims[idx], self.output_dims[idx]))
for i in xrange(self.output_dims[idx]):
assert self.sparse_init[idx] <= self.input_dims[idx]
for j in xrange(self.sparse_init[idx]):
idx2 = rng.randint(0, self.input_dims[idx])
while W[idx2, i] != 0:
idx2 = rng.randint(0, self.input_dims[idx])
W[idx2, i] = rng.randn()
W *= self.sparse_stdev[idx]
W = sharedX(W)
W.name = self.layer_name + '_W' + str(idx)
b = sharedX( np.zeros((self.output_dims[idx],)) \
+ self.init_bias[idx], \
name = self.layer_name + '_b' + str(idx))
self.W[idx] = W
self.b[idx] = b
开发者ID:nicholas-leonard,项目名称:delicious,代码行数:32,代码来源:group_gater.py
示例4: run
def run():
disturb_mem.disturb_mem()
b = sharedX(np.zeros((2,)))
channels = OrderedDict()
disturb_mem.disturb_mem()
v_max = b.max(axis=0)
v_min = b.min(axis=0)
v_range = v_max - v_min
updates = []
for i, val in enumerate([
v_max.max(),
v_max.min(),
v_range.max(),
]):
disturb_mem.disturb_mem()
s = sharedX(0., name='s_'+str(i))
updates.append((s, val))
for var in theano.gof.graph.ancestors(update for var, update in updates):
if var.name is not None:
if var.name[0] != 's' or len(var.name) != 2:
var.name = None
for key in channels:
updates.append((s, channels[key]))
file_path='nondeterminism_6.txt'
mode = RecordMode(file_path=file_path,
replay=0)
f = theano.function([], mode=mode, updates=updates, on_unused_input='ignore', name='f')
"""
print 'type(f): ',type(f)
print 'elements of f:'
for elem in dir(f):
print '\t',elem
print 'type(f.fn): ',type(f.fn)
print 'elements of f.fn:'
for elem in dir(f.fn):
print '\t',elem
"""
trials = 1
for i in xrange(trials):
disturb_mem.disturb_mem()
f()
mode.record.f.flush()
mode.record.f.close()
mode.set_record(Record(file_path=file_path, replay=1))
for i in xrange(trials):
disturb_mem.disturb_mem()
f()
开发者ID:cc13ny,项目名称:galatea,代码行数:60,代码来源:nondeterminism_6.py
示例5: __init__
def __init__(self, nvis, nhid, hidden_transition_model, irange=0.05,
non_linearity='sigmoid', use_ground_truth=True):
allowed_non_linearities = {'sigmoid': T.nnet.sigmoid,
'tanh': T.tanh}
self.nvis = nvis
self.nhid = nhid
self.hidden_transition_model = hidden_transition_model
self.use_ground_truth = use_ground_truth
self.alpha = sharedX(1)
self.alpha_decrease_rate = 0.999
assert non_linearity in allowed_non_linearities
self.non_linearity = allowed_non_linearities[non_linearity]
# Space initialization
self.input_space = VectorSpace(dim=self.nvis)
self.hidden_space = VectorSpace(dim=self.nhid)
self.output_space = VectorSpace(dim=1)
self.input_source = 'features'
self.target_source = 'targets'
# Features-to-hidden matrix
W_value = numpy.random.uniform(low=-irange, high=irange,
size=(self.nvis, self.nhid))
self.W = sharedX(W_value, name='W')
# Hidden biases
b_value = numpy.zeros(self.nhid)
self.b = sharedX(b_value, name='b')
# Hidden-to-out matrix
U_value = numpy.random.uniform(low=-irange, high=irange,
size=(self.nhid, 1))
self.U = sharedX(U_value, name='U')
# Output bias
c_value = numpy.zeros(1)
self.c = sharedX(c_value, name='c')
开发者ID:amoliu,项目名称:research,代码行数:35,代码来源:rnn.py
示例6: redo_everything
def redo_everything(self):
""" compiles learn_func if necessary
makes new negative chains
does not reset weights or biases
TODO: figure out how to make the semantics of this cleaner / more in line with other models
"""
#compile learn_func if necessary
if self.autonomous:
self.redo_theano()
#make the negative chains
if not self.use_cd:
self.V_chains = self.make_chains(self.bias_vis)
self.V_chains.name = 'dbm_V_chains'
self.H_chains = [ self.make_chains(bias_hid) for bias_hid in self.bias_hid ]
for i, H_chain in enumerate(self.H_chains):
H_chain.name = 'dbm_H[%d]_chain' % i
if self.num_classes > 0:
P = np.zeros((self.negative_chains, self.num_classes)) \
+ T.nnet.softmax( self.bias_class )
temp_theano_rng = RandomStreams(87)
sample_from = Sampler(temp_theano_rng, 'multinomial')
values = function([],sample_from(P))()
self.Y_chains = sharedX(values, 'Y_chains')
else:
self.Y_chains = None
if hasattr(self, 'init_beta') and self.init_beta is not None:
self.beta = sharedX( np.zeros( self.bias_vis.get_value().shape) + self.init_beta, name = 'beta')
开发者ID:tomsbergmanis,项目名称:pylearn2speech,代码行数:32,代码来源:dense_binary_dbm.py
示例7: profile
def profile(f):
print 'profiling ',f
rng = np.random.RandomState([2012,7,19])
batch_size = 128
rows = 30
cols = 30
channels = 16
pool_rows = 3
pool_cols = 3
zv = rng.randn(channels, rows, cols, batch_size).astype(config.floatX)
#put the inputs + outputs in shared variables so we don't pay GPU transfer during test
p_shared = sharedX(zv[:,0:rows:pool_rows,0:cols:pool_cols,:])
h_shared = sharedX(zv)
z_shared = sharedX(zv)
p_th, h_th = f( z_shared, (pool_rows, pool_cols) )
func = function([],updates = { p_shared : p_th, h_shared : h_th} )
print 'warming up'
for i in xrange(10):
func()
trials = 10
results = []
for i in xrange(trials):
t1 = time.time()
for j in xrange(10):
func()
t2 = time.time()
print t2 - t1
results.append(t2-t1)
print 'final: ',sum(results)/float(trials)
开发者ID:Alienfeel,项目名称:pylearn2,代码行数:35,代码来源:profile_probabilistic_max_pooling.py
示例8: set_input_space
def set_input_space(self, space):
""" Note: this function will reset the parameters! """
self.input_space = space
if not isinstance(space, Conv2DSpace):
raise BadInputSpaceError(self.__class__.__name__ +
".set_input_space "
"expected a Conv2DSpace, got " +
str(space) + " of type " +
str(type(space)))
rng = self.get_mlp().rng
if self.pad != (0,0):
output_shape = \
[int(np.ceil((i_sh + 2. * k_pad - k_sh) / float(k_st))) + 1
for i_sh, k_sh, k_st, k_pad in izip(self.input_space.shape,
self.kernel_shape,
self.kernel_stride,
self.pad)]
elif self.border_mode == 'valid':
output_shape = [(self.input_space.shape[0] - self.kernel_shape[0])
/ self.kernel_stride[0] + 1,
(self.input_space.shape[1] - self.kernel_shape[1])
/ self.kernel_stride[1] + 1]
elif self.border_mode == 'full':
output_shape = [(self.input_space.shape[0] + self.kernel_shape[0])
/ self.kernel_stride[0] - 1,
(self.input_space.shape[1] + self.kernel_shape[1])
/ self.kernel_stride[1] - 1]
print "In:", self.layer_name, self.input_space.shape, self.kernel_shape, self.kernel_stride, self.pad
print "Out:", self.layer_name, output_shape
self.detector_space = Conv2DSpace(shape=output_shape,
num_channels=self.output_channels,
axes=('b', 'c', 0, 1))
self.initialize_transformer(rng)
W, = self.transformer.get_params()
W.name = self.layer_name + '_W'
assert self.tied_b
if self.tied_b:
self.b = sharedX(np.zeros((self.detector_space.num_channels)) +
self.init_bias)
else:
self.b = sharedX(self.detector_space.get_origin() + self.init_bias)
self.b.name = self.layer_name + '_b'
logger.info('Input shape: {0}'.format(self.input_space.shape))
logger.info('Detector space: {0}'.format(self.detector_space.shape))
self.initialize_output_space()
开发者ID:kastnerkyle,项目名称:facedet,代码行数:60,代码来源:corrVariable.py
示例9: set_input_space
def set_input_space(self, space):
self.input_space = space
if not isinstance(space, Space):
raise TypeError("Expected Space, got "+
str(space)+" of type "+str(type(space)))
self.input_dim = space.get_total_dimension()
self.needs_reformat = not isinstance(space, VectorSpace)
desired_dim = self.input_dim
self.desired_space = VectorSpace(desired_dim)
if not self.needs_reformat:
assert self.desired_space == self.input_space
rng = self.mlp.rng
self._params = []
V = np.zeros((self.n_classes, self.input_dim),dtype=np.float32)
self.V = sharedX(V, self.layer_name + "_V" )
U = np.identity( self.input_dim)
self.U = sharedX(U, self.layer_name + "_U")
Q = np.zeros((self.input_dim, self.input_dim),dtype=np.float32)
self.Q = sharedX(Q, self.layer_name + "_Q")
Ui = np.identity(self.input_dim,dtype=np.float32)
self.Ui = sharedX(Ui, self.layer_name + "_Ui")
self._params = [ self.U, self.Ui, self.V, self.Q]
开发者ID:tomsbergmanis,项目名称:pylearn2speech,代码行数:32,代码来源:factorized_layers.py
示例10: get_updates
def get_updates(self, learning_rate, grads, lr_scalers=None):
"""
.. todo::
WRITEME
"""
updates = OrderedDict()
for param in grads.keys():
inc = sharedX(param.get_value() * 0.)
avg_grad = sharedX(np.zeros_like(param.get_value()))
avg_grad_sqr = sharedX(np.zeros_like(param.get_value()))
if param.name is not None:
avg_grad.name = 'avg_grad_' + param.name
avg_grad_sqr.name = 'avg_grad_sqr_' + param.name
new_avg_grad = self.averaging_coeff * avg_grad \
+ (1 - self.averaging_coeff) * grads[param]
new_avg_grad_sqr = self.averaging_coeff * avg_grad_sqr \
+ (1 - self.averaging_coeff) * grads[param]**2
normalized_grad = grads[param] / T.sqrt(new_avg_grad_sqr -
new_avg_grad**2 +
self.stabilizer)
updated_inc = self.momentum * inc - learning_rate * normalized_grad
updates[avg_grad] = new_avg_grad
updates[avg_grad_sqr] = new_avg_grad_sqr
updates[inc] = updated_inc
updates[param] = param + updated_inc
return updates
开发者ID:heromoga2000,项目名称:ift6085,代码行数:34,代码来源:jych_code.py
示例11: set_input_space
def set_input_space(self, space):
self.input_space = space
if isinstance(space, VectorSpace):
self.requires_reformat = False
self.input_dim = space.dim
else:
self.requires_reformat = True
self.input_dim = space.get_total_dimension()
self.desired_space = VectorSpace(self.input_dim)
if self.fprop_code==True:
self.output_space = VectorSpace(self.dim)
else:
self.output_space = VectorSpace(self.input_dim)
rng = self.mlp.rng
W = rng.randn(self.input_dim, self.dim)
self.W = sharedX(W.T, self.layer_name + '_W')
self.transformer = MatrixMul(self.W)
self.W, = self.transformer.get_params()
b = np.zeros((self.input_dim,))
self.b = sharedX(b, self.layer_name + '_b') # We need both to pass input_dim valid
X = .001 * rng.randn(self.batch_size, self.dim)
self.X = sharedX(X, self.layer_name + '_X')
self._params = [self.W, self.b, self.X]
self.state_below = T.zeros((self.batch_size, self.input_dim))
开发者ID:EderSantana,项目名称:mdpcn,代码行数:28,代码来源:dpcn.py
示例12: __init__
def __init__(self, dim, dim_hid, dim_cond, clamp_sigmoid=False, unroll_scan=1):
"""
Parameters
----------
dim : int
Number of observed binary variables
dim_hid : int
Number of latent binary variables
dim_cond : int
Number of conditioning variables
clamp_sigmoid : bool, optional
WRITEME. Defaults to `False`.
unroll_scan : int, optional
WRITEME. Defaults to 1.
"""
super(CNADE, self).__init__(dim=dim, dim_hid=dim_hid,
clamp_sigmoid=clamp_sigmoid,
unroll_scan=unroll_scan)
self.dim_cond = dim_cond
# Conditioning weights matrix for visible biases
U_b_value = self._initialize_weights(self.dim_cond, self.dim)
self.U_b = sharedX(U_b_value, 'U_b')
# Conditioning weights matrix for hidden biases
U_c_value = self._initialize_weights(self.dim_cond, self.dim_hid)
self.U_c = sharedX(U_c_value, 'U_c')
开发者ID:pombredanne,项目名称:research,代码行数:27,代码来源:nade.py
示例13: __init__
def __init__(self, n_vis_units, n_hidden_units):
Model.__init__(self)
self._W = sharedX(np.random.uniform(size=(n_vis_units, n_hidden_units)), 'W')
self._b = sharedX(np.zeros(n_hidden_units), 'b')
self._b_reconstruction = sharedX(np.zeros(n_vis_units), 'b_reconstruction')
self.input_space = VectorSpace(dim=n_vis_units)
开发者ID:consciousnesss,项目名称:learn_theano,代码行数:7,代码来源:custom_autoencoder_2.py
示例14: get_fixed_var_descr
def get_fixed_var_descr(self, model, X, Y):
"""
.. todo::
WRITEME
"""
assert Y is not None
batch_size = model.batch_size
drop_mask_X = sharedX(model.get_input_space().get_origin_batch(batch_size))
drop_mask_X.name = 'drop_mask'
X_space = model.get_input_space()
updates = OrderedDict()
rval = FixedVarDescr()
inputs=[X, Y]
if not self.supervised:
update_X = self.mask_gen(X, X_space = X_space)
else:
drop_mask_Y = sharedX(np.ones(batch_size,))
drop_mask_Y.name = 'drop_mask_Y'
update_X, update_Y = self.mask_gen(X, Y, X_space)
updates[drop_mask_Y] = update_Y
rval.fixed_vars['drop_mask_Y'] = drop_mask_Y
if self.mask_gen.sync_channels:
n = update_X.ndim
assert n == drop_mask_X.ndim - 1
update_X.name = 'raw_update_X'
zeros_like_X = T.zeros_like(X)
zeros_like_X.name = 'zeros_like_X'
update_X = zeros_like_X + update_X.dimshuffle(0,1,2,'x')
update_X.name = 'update_X'
updates[drop_mask_X] = update_X
rval.fixed_vars['drop_mask'] = drop_mask_X
if hasattr(model.inference_procedure, 'V_dropout'):
include_prob = model.inference_procedure.include_prob
include_prob_V = model.inference_procedure.include_prob_V
include_prob_Y = model.inference_procedure.include_prob_Y
theano_rng = MRG_RandomStreams(2012+11+20)
for elem in flatten([model.inference_procedure.V_dropout]):
updates[elem] = theano_rng.binomial(p=include_prob_V, size=elem.shape, dtype=elem.dtype, n=1) / include_prob_V
if "Softmax" in str(type(model.hidden_layers[-1])):
hid = model.inference_procedure.H_dropout[:-1]
y = model.inference_procedure.H_dropout[-1]
updates[y] = theano_rng.binomial(p=include_prob_Y, size=y.shape, dtype=y.dtype, n=1) / include_prob_Y
else:
hid = model.inference_procedure.H_dropout
for elem in flatten(hid):
updates[elem] = theano_rng.binomial(p=include_prob, size=elem.shape, dtype=elem.dtype, n=1) / include_prob
rval.on_load_batch = [utils.function(inputs, updates=updates)]
return rval
开发者ID:amishtal,项目名称:pylearn2,代码行数:60,代码来源:dbm.py
示例15: __init__
def __init__(self, scale_grads=1, target_scale=.1,
discriminator_default_input_include_prob = 1.,
discriminator_input_include_probs=None,
discriminator_default_input_scale=1.,
discriminator_input_scales=None,
generator_default_input_include_prob = 1.,
generator_default_input_scale=1.,
inference_default_input_include_prob=None,
inference_input_include_probs=None,
inference_default_input_scale=1.,
inference_input_scales=None,
init_now_train_generator=True,
ever_train_discriminator=True,
ever_train_generator=True,
ever_train_inference=True,
no_drop_in_d_for_g=False,
alternate_g = False,
infer_layer=None,
noise_both = 0.,
g_eps = 0.,
d_eps =0.):
self.__dict__.update(locals())
del self.self
# These allow you to dynamically switch off training parts.
# If the corresponding ever_train_* is False, these have
# no effect.
self.now_train_generator = sharedX(init_now_train_generator)
self.now_train_discriminator = sharedX(numpy.array(1., dtype='float32'))
self.now_train_inference = sharedX(numpy.array(1., dtype='float32'))
开发者ID:HyoungWooPark,项目名称:adversarial,代码行数:29,代码来源:__init__.py
示例16: __init__
def __init__(self, nvis, nhid, num_S=0, init_W=None):
super(CMModel, self).__init__()
self.nvis = nvis
self.nhid = nhid
self.num_S = num_S
assert num_S in {0, 1}, "Currently only num_S == 0 or num_S == 1 is supported!"
if init_W:
model = pickle.load(open(init_W, "rb"))
W = model.W.get_value()
self.W = sharedX(W)
else:
self.W = sharedX(np.random.uniform(-1e-3, 1e-3, (nhid, nvis)))
self.S = sharedX(np.random.uniform(-1e-3, 1e-3, (nhid, nhid)))
self.theta = sharedX(np.zeros(nhid))
if self.num_S > 0:
self._params = [self.W, self.S, self.theta]
else:
self._params = [self.W, self.theta]
self.input_space = VectorSpace(dim=nvis)
self.output_space = VectorSpace(dim=nhid)
开发者ID:blilbo,项目名称:cm,代码行数:25,代码来源:cm.py
示例17: set_input_space
def set_input_space(self, space):
""" Note: this resets parameters! """
self.input_space = space
if isinstance(space, VectorSpace):
self.requires_reformat = False
self.input_dim = space.dim
else:
self.requires_reformat = True
self.input_dim = space.get_total_dimension()
self.desired_space = VectorSpace(self.input_dim)
self.output_space = VectorSpace(self.dim + self.copy_input \
* self.input_dim)
rng = self.mlp.rng
shape = (self.input_dim, self.dim)
self.b = sharedX(self.initializer.get_biases(rng, shape),
name=self.layer_name + '_b')
self.W = sharedX(self.initializer.get_weights(rng, shape),
name=self.layer_name + '_W')
self.mask = sharedX(self.initializer.get_mask(),
name=self.layer_name + '_mask')
开发者ID:nicholas-leonard,项目名称:pylearn2,代码行数:27,代码来源:initializer.py
示例18: get_updates
def get_updates(self, learning_rate, grads, lr_scalers=None):
updates = OrderedDict()
for param in grads.keys():
avg_grad_sqr = sharedX(np.zeros_like(param.get_value()))
momentum = sharedX(np.zeros_like(param.get_value()))
if param.name is not None:
avg_grad_sqr.name = 'avg_grad_sqr_' + param.name
new_avg_grad_sqr = self.averaging_coeff * avg_grad_sqr \
+ (1 - self.averaging_coeff) \
* T.sqr(grads[param])
rms_grad_t = T.sqrt(new_avg_grad_sqr)
rms_grad_t = T.maximum(rms_grad_t, self.stabilizer)
normalized_grad = grads[param] / (rms_grad_t)
new_momentum = self.momentum * momentum \
- learning_rate * normalized_grad
updates[avg_grad_sqr] = new_avg_grad_sqr
updates[momentum] = new_momentum
updates[param] = param + new_momentum
return updates
开发者ID:edamaraju,项目名称:nice,代码行数:26,代码来源:learning_rule.py
示例19: __init__
def __init__(self,W1, b1,W2,b2, mf_iter):
self.mf_iter = mf_iter
self.W1 = sharedX(W1)
self.W2 = sharedX(W2)
self.b1 = sharedX(b1)
self.b2 = sharedX(b2)
self.dataset_yaml_src = "!obj:pylearn2.datasets.mnist.MNIST { which_set : train }"
开发者ID:cc13ny,项目名称:galatea,代码行数:7,代码来源:cRBM.py
示例20: __init__
def __init__(self, dataset, model, algorithm=None, save_path=None,
save_freq=0, extensions=None, allow_overwrite=True):
self.allow_overwrite = allow_overwrite
self.first_save = True
self.dataset = dataset
self.model = model
self.algorithm = algorithm
if save_path is not None:
if save_freq == 0:
warnings.warn('save_path specified but save_freq is 0 '
'(never save). Is this intentional?')
self.save_path = preprocess(save_path)
else:
if save_freq > 0:
phase_variable = 'PYLEARN2_TRAIN_PHASE'
if phase_variable in os.environ:
phase = 'phase%d' % os.environ[phase_variable]
tokens = [os.environ['PYLEARN2_TRAIN_FILE_FULL_STEM'],
phase, 'pkl']
else:
tokens = os.environ['PYLEARN2_TRAIN_FILE_FULL_STEM'], 'pkl'
self.save_path = '.'.join(tokens)
self.save_freq = save_freq
if hasattr(self.dataset, 'yaml_src'):
self.model.dataset_yaml_src = self.dataset.yaml_src
else:
warnings.warn("dataset has no yaml src, model won't know what " +
"data it was trained on")
self.extensions = extensions if extensions is not None else []
self.training_seconds = sharedX(value=0,
name='training_seconds_this_epoch')
self.total_seconds = sharedX(value=0, name='total_seconds_last_epoch')
开发者ID:Bowen-C,项目名称:pylearn2,代码行数:34,代码来源:train.py
注:本文中的pylearn2.utils.sharedX函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论