• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python shortcuts.buildNetwork函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中pybrain.tools.shortcuts.buildNetwork函数的典型用法代码示例。如果您正苦于以下问题:Python buildNetwork函数的具体用法?Python buildNetwork怎么用?Python buildNetwork使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了buildNetwork函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: __init__

 def __init__(self, hidden, **args):
     self.setArgs(**args)
     if self.useSpecialInfo:
         net = buildNetwork(self.inGridSize**2+2, hidden, self.usedActions, outclass = SigmoidLayer)
     else:
         net = buildNetwork(self.inGridSize**2, hidden, self.usedActions, outclass = SigmoidLayer)
     ModuleMarioAgent.__init__(self, net)
开发者ID:DioMuller,项目名称:ai-exercices,代码行数:7,代码来源:networkagent.py


示例2: __init__

 def __init__(self, num_features, num_actions, indexOfAgent=None):    
     PHC_FA.__init__(self, num_features, num_actions, indexOfAgent)
     self.linQ = buildNetwork(num_features + num_actions, (num_features + num_actions), 1, hiddenclass = SigmoidLayer, outclass = LinearLayer)
     self.linPolicy = buildNetwork(num_features, (num_features + num_actions), num_actions, hiddenclass = SigmoidLayer,outclass = SigmoidLayer)
     self.averagePolicy=[]
     self.trainer4LinQ=BackpropTrainer(self.linQ,weightdecay=self.weightdecay)
     self.trainer4LinPolicy=BackpropTrainer(self.linPolicy,weightdecay=self.weightdecay)
开发者ID:Snazz2001,项目名称:Multi-Agent-Reinforcement-Learning-in-Stochastic-Games,代码行数:7,代码来源:phc.py


示例3: buildCustomNetwork

 def buildCustomNetwork(self, hiddenLayers, train_faces):
     myfnn = None     
     print "building network..."
     if len(hiddenLayers) == 1:
         myfnn = buildNetwork( 
           train_faces.indim, 
           hiddenLayers[0],
           train_faces.outdim, 
           outclass=SoftmaxLayer
         )
     elif len(hiddenLayers) == 2:
         myfnn = buildNetwork( 
           train_faces.indim, 
           hiddenLayers[0],
           hiddenLayers[1],
           train_faces.outdim, 
           outclass=SoftmaxLayer
         )
     elif len(hiddenLayers) == 3:
         myfnn = buildNetwork( 
           train_faces.indim, 
           hiddenLayers[0],
           hiddenLayers[1],
           hiddenLayers[2],
           train_faces.outdim, 
           outclass=SoftmaxLayer
         )
     return myfnn
开发者ID:mwebergithub,项目名称:face457b,代码行数:28,代码来源:supervised_facial_classifier.py


示例4: __init__

     def __init__(self, motion, memory, sonar, posture):
         self.motionProxy = motion
         self.memoryProxy = memory
         self.sonarProxy = sonar
         self.postureProxy = posture
         self.useSensors    = True
         self.inputLength = 26+18
         self.outputLength = 26
         self.sonarProxy.subscribe("Closed-Loop Motor Babbling") #Start the sonor
         self.set_stiffness(0.3)
         self.net = buildNetwork(INPUTSIZE,HIDDENSIZE,OUTPUTSIZE)

         #Hierarchical Control Networks 
         self.netH1 = buildNetwork(INPUTSIZE,HIDDENSIZE,OUTPUTSIZE)
         self.netH2 = buildNetwork(INPUTSIZE,HIDDENSIZE,OUTPUTSIZE)
         self.sMemory1 = np.array([1]*(INPUTSIZE + PREDICTSIZE))
         self.sMemory2 = np.array([1]*(INPUTSIZE + PREDICTSIZE))
         self.mMemory1 = np.array([0]*OUTPUTSIZE)
         self.mMemory2 = np.array([0]*OUTPUTSIZE)
         

         # Access global joint limits.
         self.Body = motion.getLimits("Body")
         self.bangles =  [1] * 26
         self.othersens = [2] * 18
         self.sMemory = np.array([1]*(INPUTSIZE + PREDICTSIZE))
         self.mMemory = np.array([0]*OUTPUTSIZE)
         self.cl = curiosityLoop()

         self.rand = Random()
         self.rand.seed(int(time()))

         #Initialize a model dictionary
         self.models = dict()
开发者ID:ctf20,项目名称:DarwinianNeurodynamics,代码行数:34,代码来源:motorBabbling15.py


示例5: reset

  def reset(self, params, repetition):
    print params

    self.nDimInput = 3
    self.inputEncoder = PassThroughEncoder()

    if params['output_encoding'] == None:
      self.outputEncoder = PassThroughEncoder()
      self.nDimOutput = 1
    elif params['output_encoding'] == 'likelihood':
      self.outputEncoder = ScalarBucketEncoder()
      self.nDimOutput = self.outputEncoder.encoder.n

    if params['dataset'] == 'nyc_taxi' or params['dataset'] == 'nyc_taxi_perturb_baseline':
      self.dataset = NYCTaxiDataset(params['dataset'])
    else:
      raise Exception("Dataset not found")

    self.testCounter = 0
    self.resets = []
    self.iteration = 0

    # initialize LSTM network
    random.seed(6)
    if params['output_encoding'] == None:
      self.net = buildNetwork(self.nDimInput, params['num_cells'], self.nDimOutput,
                         hiddenclass=LSTMLayer, bias=True, outputbias=True, recurrent=True)
    elif params['output_encoding'] == 'likelihood':
      self.net = buildNetwork(self.nDimInput, params['num_cells'], self.nDimOutput,
                         hiddenclass=LSTMLayer, bias=True, outclass=SigmoidLayer, recurrent=True)

    (self.networkInput, self.targetPrediction, self.trueData) = \
      self.dataset.generateSequence(
      prediction_nstep=params['prediction_nstep'],
      output_encoding=params['output_encoding'])
开发者ID:oxtopus,项目名称:nupic.research,代码行数:35,代码来源:suite.py


示例6: buildFNN

def buildFNN(testData, trainData):
    '''
    Input: testing data object, training data object
    Output: Prints details of best FNN
    '''
        
    accuracy=0
    model = None
    params = None
    fnn = buildNetwork( trainData.indim, (trainData.indim + trainData.outdim)/2, trainData.outdim, hiddenclass=TanhLayer, outclass=SoftmaxLayer, bias='true' )    
    trainer = BackpropTrainer(fnn, dataset=trainData, momentum=0.1, verbose=False, weightdecay=0.01)        
    a=calculateANNaccuracy(fnn, trainData, testData, trainer)    
    if a>accuracy:
        model=fnn
        accuracy=a
        params='''network = [Hidden Layer = TanhLayer; Hidden Layer Units= (Input+Output)Units/2; Output Layer = SoftmaxLayer]\n'''
    
    fnn = buildNetwork( trainData.indim, trainData.indim, trainData.outdim, hiddenclass=TanhLayer, outclass=SoftmaxLayer, bias='true' )
    trainer = BackpropTrainer(fnn, dataset=trainData, momentum=0.1, verbose=False, weightdecay=0.01)
    a=calculateANNaccuracy(fnn, trainData, testData, trainer)
    if a>accuracy:
        model=fnn
        accuracy=a
        params='''network = [Hidden Layer = TanhLayer; Hidden Layer Units = Input Units; Output Layer = SoftmaxLayer]\n'''
    
        
    fnn = buildNetwork( trainData.indim, (trainData.indim + trainData.outdim)/2, trainData.outdim, hiddenclass=TanhLayer, outclass=SigmoidLayer, bias='true' )
    trainer = BackpropTrainer(fnn, dataset=trainData, momentum=0.1, verbose=False, weightdecay=0.01)
    a=calculateANNaccuracy(fnn, trainData, testData, trainer)
    if a>accuracy:
        model=fnn
        accuracy=a
        params='''network = [Hidden Layer = TanhLayer; Hidden Layer Units = (Input+Output)Units/2; Output Layer = SigmoidLayer]\n'''
    
        
    fnn = buildNetwork( trainData.indim, (trainData.indim + trainData.outdim)/2, trainData.outdim, hiddenclass=TanhLayer, outclass=SigmoidLayer, bias='true' )
    trainer = BackpropTrainer(fnn, dataset=trainData, momentum=0.1, verbose=False, weightdecay=0.01)
    a=calculateANNaccuracy(fnn, trainData, testData, trainer)
    if a>accuracy:
        model=fnn
        accuracy=a
        params='''network = [Hidden Layer = TanhLayer; Hidden Layer Units = Input Units; Output Layer = SigmoidLayer]\n'''
    
    fnn = buildNetwork( trainData.indim, (trainData.indim + trainData.outdim)/2, (trainData.indim + trainData.outdim)/2, trainData.outdim, hiddenclass=TanhLayer, outclass=SoftmaxLayer, bias='true' )
    trainer = BackpropTrainer(fnn, dataset=trainData, momentum=0.1, verbose=False, weightdecay=0.01)
    a=calculateANNaccuracy(fnn, trainData, testData, trainer)
    if a>accuracy:
        model=fnn
        accuracy=a
        params='''network = [TWO (2) Hidden Layers = TanhLayer; Hidden Layer Units = (Input+Output)Units/2; Output Layer = SoftmaxLayer]\n'''
    
        
    print '\nThe best model had '+str(accuracy)+'% accuracy and used the parameters:\n'+params+'\n'
开发者ID:aplassard,项目名称:Image_Processing,代码行数:53,代码来源:ann.py


示例7: __init__

    def __init__(self, prev=5):
        # timsig beat, timsig denom, prev + curr dur/freq, prev 3 chords, bass note
        self.t_ds = SupervisedDataSet((prev+1) * 2 + 4, 2)
        self.t_net = buildNetwork((prev+1) * 2 + 4, 50, 75, 25, 2)
        self.t_freq_err = []
        self.t_dur_err = []

        self.b_ds = SupervisedDataSet((prev+1) * 2 + 4, 2)
        self.b_net = buildNetwork((prev+1) * 2 + 4, 50, 75, 25, 2)
        self.b_freq_err = []
        self.b_dur_err = []

        self.prev = prev
        self.corpus = []
开发者ID:ijoosong,项目名称:classical-ml,代码行数:14,代码来源:NeuralNetwork.py


示例8: __init__

    def __init__(self, array=None):

        if array == None:
            ##self.net  = [Network((18,18,1)) for i in range(9)]
            ##self.theta = [self.net[i].theta for i in range(9)]
            self.net = buildNetwork(18, 18, 9)
            self.theta = self.net.params

        else:
            ##self.theta = array
            ##self.net = [Network((18,18,1),self.theta[i]) for i in range(9)]
            self.theta = array
            self.net = buildNetwork(18, 18, 9)
            self.net._params = self.theta
开发者ID:Chuphay,项目名称:python,代码行数:14,代码来源:tic_tac.py


示例9: reset

    def reset(self):
        FA.reset(self)

        # self.network = buildNetwork(self.indim, 2*(self.indim+self.outdim), self.outdim)
        self.network = buildNetwork(self.indim, self.outdim, bias=True)
        self.network._setParameters(random.normal(0, 0.1, self.network.params.shape))
        self.pybdataset = SupervisedDataSet(self.indim, self.outdim)
开发者ID:rueckstiess,项目名称:dopamine,代码行数:7,代码来源:pybnn.py


示例10: train_net

    def train_net(self,training_times_input=100,num_neroun=200,learning_rate_input=0.1,weight_decay=0.1,momentum_in = 0,verbose_input=True):
        '''
        The main function to train the network
        '''
        print self.trndata['input'].shape
        raw_input()
        self.network=buildNetwork(self.trndata.indim,
                                  num_neroun,self.trndata.outdim,
                                  bias=True,
                                  hiddenclass=SigmoidLayer,
                                  outclass = LinearLayer)
        self.trainer=BackpropTrainer(self.network,
                                     dataset=self.trndata,
                                     learningrate=learning_rate_input,
                                     momentum=momentum_in,
                                     verbose=True,
                                     weightdecay=weight_decay )

        for iter in range(training_times_input):
            print "Training", iter+1,"times"
            self.trainer.trainEpochs(1)
            trn_error = self._net_performance(self.network, self.trndata)
            tst_error = self._net_performance(self.network, self.tstdata)
            print "the trn error is: ", trn_error
            print "the test error is: ",tst_error

        '''prediction on all data:'''
开发者ID:DajeRoma,项目名称:clicc-flask,代码行数:27,代码来源:regression.py


示例11: run

    def run(self, fold, X_train, y_train, X_test, y_test):
        DS_train, DS_test = ClassificationData.convert_to_DS(
            X_train,
            y_train,
            X_test,
            y_test)

        NHiddenUnits = self.__get_best_hu(DS_train)
        fnn = buildNetwork(
            DS_train.indim,
            NHiddenUnits,
            DS_train.outdim,
            outclass=SoftmaxLayer,
            bias=True)

        trainer = BackpropTrainer(
            fnn,
            dataset=DS_train,
            momentum=0.1,
            verbose=False,
            weightdecay=0.01)

        trainer.trainEpochs(self.epochs)
        tstresult = percentError(
            trainer.testOnClassData(dataset=DS_test),
            DS_test['class'])

        print "NN fold: %4d" % fold, "; test error: %5.2f%%" % tstresult
        return tstresult / 100.0
开发者ID:dzitkowskik,项目名称:Introduction-To-Machine-Learning-And-Data-Mining,代码行数:29,代码来源:PyBrainNN.py


示例12: neuralNetwork_eval_func

 def neuralNetwork_eval_func(self, chromosome):
     node_num, learning_rate, window_size = self.decode_chromosome(chromosome)
     if self.check_log(node_num, learning_rate, window_size):
         return self.get_means_from_log(node_num, learning_rate, window_size)[0]
     folded_dataset = self.create_folded_dataset(window_size)
     indim = 21 * (2 * window_size + 1)
     mean_AUC = 0
     mean_decision_value = 0
     mean_mcc = 0
     sample_size_over_thousand_flag = False
     for test_fold in xrange(self.fold):
         test_labels, test_dataset, train_labels, train_dataset = folded_dataset.get_test_and_training_dataset(test_fold)
         if len(test_labels) + len(train_labels) > 1000:
             sample_size_over_thousand_flag = True
         ds = SupervisedDataSet(indim, 1)
         for i in xrange(len(train_labels)):
             ds.appendLinked(train_dataset[i], [train_labels[i]])
         net = buildNetwork(indim, node_num, 1, outclass=SigmoidLayer, bias=True)
         trainer = BackpropTrainer(net, ds, learningrate=learning_rate)
         trainer.trainUntilConvergence(maxEpochs=self.maxEpochs_for_trainer)
         decision_values = [net.activate(test_dataset[i]) for i in xrange(len(test_labels))]
         decision_values = map(lambda x: x[0], decision_values)
         AUC, decision_value_and_max_mcc = validate_performance.calculate_AUC(decision_values, test_labels)
         mean_AUC += AUC
         mean_decision_value += decision_value_and_max_mcc[0]
         mean_mcc += decision_value_and_max_mcc[1]
         if sample_size_over_thousand_flag:
             break
     if not sample_size_over_thousand_flag:
         mean_AUC /= self.fold
         mean_decision_value /= self.fold
         mean_mcc /= self.fold
     self.write_log(node_num, learning_rate, window_size, mean_AUC, mean_decision_value, mean_mcc)
     self.add_log(node_num, learning_rate, window_size, mean_AUC, mean_decision_value, mean_mcc)
     return mean_AUC
开发者ID:clclcocoro,项目名称:MLwithGA,代码行数:35,代码来源:cross_validation.py


示例13: setUp

 def setUp(self):
   self.nn = buildNetwork(4,6,3, bias=False, hiddenclass=TanhLayer, 
                    outclass=TanhLayer)
   self.nn.sortModules()
   self.in_to_hidden, = self.nn.connections[self.nn['in']]
   self.hiddenAstroLayer = AstrocyteLayer(self.nn['hidden0'], 
                                          self.in_to_hidden)
开发者ID:mfbx9da4,项目名称:neuron-astrocyte-networks,代码行数:7,代码来源:testastrocyte_layer.py


示例14: createNN

def createNN(indim, hiddim, outdim):
    nn = buildNetwork(indim, hiddim, outdim,
                    bias=False,
                    hiddenclass=TanhLayer, 
                    outclass=TanhLayer)
    nn.sortModules()
    return nn
开发者ID:mfbx9da4,项目名称:neuron-astrocyte-networks,代码行数:7,代码来源:add_astrocytes_to_learned_weigts_XOR.py


示例15: train

def train(data):
	"""
	See http://www.pybrain.org/docs/tutorial/fnn.html

	Returns a neural network trained on the test data.

	Parameters:
	  data - A ClassificationDataSet for training.
	         Should not include the test data.
	"""
	network = buildNetwork(
		# This is where we specify the architecture of
		# the network.  We can play around with different
		# parameters here.
		# http://www.pybrain.org/docs/api/tools.html
		data.indim, 5, data.outdim,
		hiddenclass=SigmoidLayer,
		outclass=SoftmaxLayer
	)

	# We can fiddle around with this guy's options as well.
	# http://www.pybrain.org/docs/api/supervised/trainers.html
	trainer = BackpropTrainer(network, dataset=data)
	trainer.trainUntilConvergence(maxEpochs=20)

	return network
开发者ID:IPPETAD,项目名称:ProjectSmiley,代码行数:26,代码来源:neural_net_learner.py


示例16: nn_1

    def nn_1(self):
        logging.info('Beginning Neural Network model.')
        
        class ThisNN(): # Used to abstract away fit function
            def __init__(self, nn, kg):
                self.nn = nn
                self.kg = kg
                
            def fit(self, X, Y):
                logging.info('Generating a Pybrain SupervisedDataSet')
                ds = SupervisedDataSet(X,Y)
                trainer = BackpropTrainer(self.nn,ds)
                for i in range(0,10):
                    logging.debug(trainer.train()) # XXX Runs once
                logging.info('Training Neural Network until Convergence')

                cv = SupervisedDataSet(self.kg.X_cv[:,1:],self.kg.Y_cv[:,1:])
                trainer.trainUntilConvergence(verbose=11, validationData=cv, trainingData=ds)
            
            def predict_x(self, X):
                Y = []
                for i in range(0,X.shape[0]):
                    Y.append(self.nn.activate(X[i,:]))
                return np.asarray(Y)

        net = buildNetwork(self.X_train.shape[1] - 1,3,1) # X - 1 to avoid ID
        this_nn = ThisNN(net,self) 
        self.__fit(net,this_nn.fit) 
        self.__score_cv(net,this_nn.predict_x)        
        self.__score_test(net,this_nn.predict_x)
        self.predict_y_submission(this_nn.predict_x)
        self.write_submission('nn.csv')
        self.models['nn'] = net
        logging.info('Completed Neural Network model.')
        return net
开发者ID:supertetelman,项目名称:Kaggle,代码行数:35,代码来源:kaggle_data.py


示例17: trainNetwork

def trainNetwork(inData, numOfSamples, numOfPoints, epochs):
    # Build the dataset
    alldata = createRGBdataSet(inData, numOfSamples, numOfPoints)
    # Split into test and training data
    trndata, tstdata = splitData(alldata)

    # Report  stats
    print "Number of training patterns: ", len(trndata)
    print "Input and output dimensions: ", trndata.indim, trndata.outdim
    print "First sample (input, target, class):"
    print trndata['input'][0], trndata['target'][0], trndata['class'][0]

    # Build and train the network
    fnn = buildNetwork( trndata.indim, 256, trndata.outdim, outclass=SoftmaxLayer )
    trainer = BackpropTrainer( fnn, dataset=trndata, momentum=0.001, verbose=True, weightdecay=0.001)
    #trainer.trainEpochs( epochs )
    trainer.trainUntilConvergence(maxEpochs=epochs)

    # Report results
    trnresult = percentError( trainer.testOnClassData(), trndata['class'] )
    tstresult = percentError( trainer.testOnClassData( dataset=tstdata ), tstdata['class'] )
    print "epoch: %4d" % trainer.totalepochs, \
      "  train error: %5.2f%%" % trnresult, \
      "  test error: %5.2f%%" % tstresult

    # Report results of final network
    checkNeuralNet(trainer, alldata, numOfSamples)
    return fnn
开发者ID:johnesquivel,项目名称:RaspVoiceRecog,代码行数:28,代码来源:buildModel.py


示例18: fit

    def fit(self, X, y):
        self.nn = buildNetwork(*self.layers, bias=True, hiddenclass=SigmoidLayer)

        self.ds = SupervisedDataSet(self.layers[0], self.layers[-1])
        for i, row in enumerate(X):
            self.ds.addSample(row.tolist(), y[i])
        self.improve()
开发者ID:crcollins,项目名称:ML,代码行数:7,代码来源:neural.py


示例19: trainNetwork

 def trainNetwork(self,proportion = 0):        
     if proportion != 0:
         tstdata, trndata = self.alldata.splitWithProportion( 0.01*proportion )
     else:
         trndata = self.alldata
     trndata._convertToOneOfMany( )
     if proportion != 0:
         tstdata._convertToOneOfMany( )
     print "Number of training patterns: ", len(trndata)
     print "Input and output dimensions: ", trndata.indim, trndata.outdim
     self.fnn = buildNetwork( trndata.indim, self.hidden_layer_size, trndata.outdim, 
                              hiddenclass=SigmoidLayer,outclass=SoftmaxLayer )
     self.trainer = BackpropTrainer( self.fnn, dataset=trndata, momentum=0.1, verbose=True, weightdecay=0.01)
     for i in range(self.iterations_number):
         self.trainer.trainEpochs( 1 )
         trnresult = percentError( self.trainer.testOnClassData(),
                                   trndata['class'] )
         if proportion != 0:
             tstresult = percentError( self.trainer.testOnClassData(
                dataset=tstdata ), tstdata['class'] )
     
         if proportion != 0:
             print "epoch: %4d" % self.trainer.totalepochs, \
               "  train error: %5.2f%%" % trnresult, \
               "  test error: %5.2f%%" % tstresult
         else:
             print "epoch: %4d" % self.trainer.totalepochs, \
               "  train error: %5.2f%%" % trnresult
开发者ID:mcopik,项目名称:PyGestures,代码行数:28,代码来源:neural_network.py


示例20: parse_and_train

 def parse_and_train(self):
     f = open(self.file,'r')
     learn_lines = []
     for line in f:
         if line.strip() != '':
             learn_lines.append(line)
     i = 0
     f.close()
     while i < len(learn_lines):
         ins, outs = self.convert_to_tuple(learn_lines[i],learn_lines[i+1])
         i += 2
         self.ds.addSample(ins,outs)
     self.nn = buildNetwork(self.ios,self.hns,25,self.ios)
     #self.train_dat, self.test_dat = self.ds.splitWithProportion(0.75)
     self.train_dat = self.ds
     trnr = BackpropTrainer(self.nn,dataset=self.train_dat,momentum=0.1,verbose=False,weightdecay=0.01)
     i = 150
     trnr.trainEpochs(150)
     while i < self.epochs:
         trnr.trainEpochs(50)
         i += 50
         print 'For epoch ' + str(i)
         print 'For train:'
         self.print_current_error()
         #print 'For test:'
         #self.print_validation()
     self.nn.sortModules()
开发者ID:iforneri,项目名称:EmpathicaNLP,代码行数:27,代码来源:nlp_nn.py



注:本文中的pybrain.tools.shortcuts.buildNetwork函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python networkreader.NetworkReader类代码示例发布时间:2022-05-25
下一篇:
Python networkwriter.NetworkWriter类代码示例发布时间:2022-05-25
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap