• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python datasets.fetch_olivetti_faces函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中sklearn.datasets.fetch_olivetti_faces函数的典型用法代码示例。如果您正苦于以下问题:Python fetch_olivetti_faces函数的具体用法?Python fetch_olivetti_faces怎么用?Python fetch_olivetti_faces使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了fetch_olivetti_faces函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: get_olivetti_faces

def get_olivetti_faces():
    faces = fetch_olivetti_faces()
    
    faces.data = faces.data.astype(np.float32)
    faces.target = faces.target.astype(np.int32)

    return faces.data, faces.target
开发者ID:ToraxXx,项目名称:gsdr,代码行数:7,代码来源:util.py


示例2: main

def main():
    nComponents = 50
    # Import a dataset for testing
    Faces = data.fetch_olivetti_faces()
    Images = Faces.images
    trainData = Images[:100,:,:]
    testData = Images[100:,:,:]
    # Produce a low dimensional representation
    lowDimTrainData, lowDimTestData = reduceDim( trainData, testData, \
                                                nComponents )
开发者ID:potachen,项目名称:COS424_Project,代码行数:10,代码来源:nmf.py


示例3: data_processing_olivetti

def data_processing_olivetti():
    """
    Python function for importing the Olivetti data set.
    """
    dataset = fetch_olivetti_faces()
    faces = dataset.data
    n_samles, n_features = faces.shape
    class_indices = dataset['target']

    train_set = []
    train_class_indices = []
    train_batches = []
    test_set = []
    test_class_indices = []
    test_batches = []

    curr_idx_count = 0
    batch_count_train = 0
    batch_count_test = 0
    for i in range(len(class_indices)):
        if curr_idx_count <= 6:
            train_set.append(faces[i].reshape((1,len(faces[i]))))
            train_class_indices.append(array([class_indices[i]]))
            train_batches.append(batch_count_train)
            batch_count_train += 1
        elif curr_idx_count <=9:
            test_set.append(faces[i].reshape((1,len(faces[i]))))
            test_class_indices.append(array([class_indices[i]]))
            test_batches.append(batch_count_test)
            batch_count_test += 1
        if curr_idx_count == 9:
            curr_idx_count = -1

        curr_idx_count += 1



    train_path = "output/train/bag_of_words"
    os.makedirs(train_path)
    m.dump(array(train_batches),open(os.path.join(train_path,"batches.p"),"wb"))
    for i in range(len(train_set)):
        m.dump(train_set[i],open(os.path.join(train_path,"bow_batch_"+str(train_batches[i]))+".p","wb"))
        m.dump(train_class_indices[i],open(os.path.join(train_path,"class_indices_batch_"+str(train_batches[i]))+".p","wb"))


    test_path = "output/test/bag_of_words"
    os.makedirs(test_path)
    m.dump(array(test_batches),open(os.path.join(test_path,"batches.p"),"wb"))
    for i in range(len(test_set)):
        m.dump(test_set[i],open(os.path.join(test_path,"bow_batch_"+str(test_batches[i]))+".p","wb"))
        m.dump(test_class_indices[i],open(os.path.join(test_path,"class_indices_batch_"+str(test_batches[i]))+".p","wb"))
开发者ID:vseledkin,项目名称:Deep-Belief-Nets-for-Topic-Modeling,代码行数:51,代码来源:data_processing_olivetti.py


示例4: get_data

def get_data(dataset_name):
    print("Getting dataset: %s" % dataset_name)

    if dataset_name == 'lfw_people':
        X = fetch_lfw_people().data
    elif dataset_name == '20newsgroups':
        X = fetch_20newsgroups_vectorized().data[:, :100000]
    elif dataset_name == 'olivetti_faces':
        X = fetch_olivetti_faces().data
    elif dataset_name == 'rcv1':
        X = fetch_rcv1().data
    elif dataset_name == 'CIFAR':
        if handle_missing_dataset(CIFAR_FOLDER) == "skip":
            return
        X1 = [unpickle("%sdata_batch_%d" % (CIFAR_FOLDER, i + 1))
              for i in range(5)]
        X = np.vstack(X1)
        del X1
    elif dataset_name == 'SVHN':
        if handle_missing_dataset(SVHN_FOLDER) == 0:
            return
        X1 = sp.io.loadmat("%strain_32x32.mat" % SVHN_FOLDER)['X']
        X2 = [X1[:, :, :, i].reshape(32 * 32 * 3) for i in range(X1.shape[3])]
        X = np.vstack(X2)
        del X1
        del X2
    elif dataset_name == 'low rank matrix':
        X = make_low_rank_matrix(n_samples=500, n_features=np.int(1e4),
                                 effective_rank=100, tail_strength=.5,
                                 random_state=random_state)
    elif dataset_name == 'uncorrelated matrix':
        X, _ = make_sparse_uncorrelated(n_samples=500, n_features=10000,
                                        random_state=random_state)
    elif dataset_name == 'big sparse matrix':
        sparsity = np.int(1e6)
        size = np.int(1e6)
        small_size = np.int(1e4)
        data = np.random.normal(0, 1, np.int(sparsity/10))
        data = np.repeat(data, 10)
        row = np.random.uniform(0, small_size, sparsity)
        col = np.random.uniform(0, small_size, sparsity)
        X = sp.sparse.csr_matrix((data, (row, col)), shape=(size, small_size))
        del data
        del row
        del col
    else:
        X = fetch_mldata(dataset_name).data
    return X
开发者ID:0664j35t3r,项目名称:scikit-learn,代码行数:48,代码来源:bench_plot_randomized_svd.py


示例5: task4

def task4():
	data = fetch_olivetti_faces(shuffle=True, random_state=0).data
	image_shape = (64, 64)
	model = RandomizedPCA(n_components=10)
	model.fit(data)
	data_new = model.transform(data)
	mean_components = [data_new[:, i].mean() for i in xrange(data_new.shape[1])]
	influence = np.zeros((data_new.shape[0], data_new.shape[1]))
	for i in xrange(data_new.shape[0]):
		for j in xrange(data_new.shape[1]):
			influence[i, j] = cos(data_new[i, :], mean_components, np.abs(data_new[i, j]), mean_components[j])
	res = []
	for i in xrange(influence.shape[1]):
		res.append(np.argmax(influence[:, i]))
	print res
	write_answer_4(res)
开发者ID:astarostin,项目名称:MachineLearningSpecializationCoursera,代码行数:16,代码来源:task1.py


示例6: __init__

    def __init__(self, batch_size, max_patches=50, patch_size=(20, 20), images_num=None, rng=None):
        from sklearn import datasets as sklearn_datasets
        from sklearn.feature_extraction.image import extract_patches_2d

        self._train_batch_size = batch_size
        self._test_batch_size = batch_size

        rng = rng if not rng is None else np.random.RandomState(12)

        faces = sklearn_datasets.fetch_olivetti_faces()
        images_num = images_num if not images_num is None else faces.images.shape[0]

        x_v = np.zeros((max_patches * images_num, patch_size[0]*patch_size[1]))
        classes = np.zeros((max_patches * images_num,))
        
        for img_id, img in enumerate(faces.images):
            if img_id >= images_num:
                break

            patches_id = ((img_id * max_patches),((img_id+1) * max_patches))
            
            x_v[patches_id[0]:patches_id[1], :] = extract_patches_2d(
                img, 
                patch_size, 
                max_patches=max_patches, 
                random_state=rng
            ).reshape((max_patches, patch_size[0]*patch_size[1]))
            
            classes[patches_id[0]:patches_id[1]] = faces.target[img_id]
        
        y_v = one_hot_encode(classes)
        
        test_prop = x_v.shape[0]/5

        self._xt_v = x_v
        self._yt_v = y_v

        self._x_v = x_v
        self._y_v = y_v
        self._i = 0

        self._x_v -= np.mean(self._x_v, axis=0)
        self._x_v /= np.std(self._x_v, axis=0)
        self._x_v *= 0.1
开发者ID:alexeyche,项目名称:alexeyche-junk,代码行数:44,代码来源:datasets.py


示例7: init_features

 def init_features(self):
     if self.feature_coef_ is None:
         self.feature_coef_ = self.redis.get("feature_coef")
     if self.feature_coef_ is None:
         pca = PCA(self._n_components)
         test_faces = fetch_olivetti_faces()
         features = np.array(pca.fit_transform(test_faces.data),
                             dtype=np.float32)
         self.redis.set("name:0", "olivetti_faces")
         self.redis.set("name_id:olivetti_faces", 0)
         feature_coef = np.array(pca.components_.T, np.float64)
         dim1, dim2 = feature_coef.shape
         self.redis.hmset("feature_coef", 
                 {"dim1":dim1, "dim2":dim2,
                  "data":feature_coef.tostring()})
         test_features = [f.tostring() for f in features]
         self.redis.rpush("features", *test_features)
         test_face_data = [np.array(f, dtype=np.float32).tostring() for f in test_faces.data]
         self.redis.rpush("faces", *test_face_data)
         for i in xrange(len(test_faces.data)):
             self.redis.hmset("picture:%d" % (i),
                              {"name_id":0, "pic_path":DUMMY_PATH})
         self.redis.set("last_pic_id", len(test_faces.data) - 1)
开发者ID:lucidfrontier45,项目名称:PyFace,代码行数:23,代码来源:RedisFaceRecognizer.py


示例8: fetch_olivetti_faces

if __name__ == "__main__":    
    
    #Overview:
    #Olivetti dataset
    #Split into test and training
    #extract keypoints and compute sift features on training images        
    #cluster sift features into a visual dictionary of size V
    #represent each image as visual words histogram
    #apply tf-idf (need text data)    
    #fit LDA topic model on bags of visual words
    #given test data transform test image into tf_idf vector
    #use cosine similarity for image retrieval
    #display top-K images
                                                             
    # Load the faces datasets
    data = fetch_olivetti_faces(shuffle=True, random_state=0)
    targets = data.target
    
    data = data.images.reshape((len(data.images), -1))
    data_train = data[targets < 30]
    data_test = data[targets >= 30]
    num_train_images = data_train.shape[0]
        
    #show mean training image        
    plt.figure()
    plt.imshow(np.mean(data_train,axis=0).reshape(64,64))    
    plt.title('Olivetti Dataset (Mean Training Image)')    
    plt.show()
    
    #show random selection of images
    rnd_idx = np.arange(num_train_images)
开发者ID:vsmolyakov,项目名称:cv,代码行数:31,代码来源:visual_words.py


示例9: test2

    def test2(self):
        

        # Display progress logs on stdout
        logging.basicConfig(level=logging.INFO,
                            format='%(asctime)s %(levelname)s %(message)s')
        n_row, n_col = 2, 3
        n_components = n_row * n_col
        image_shape = (64, 64)
        rng = RandomState(0)
        
        ###############################################################################
        # Load faces data
        dataset = fetch_olivetti_faces(shuffle=True, random_state=rng)
        faces = dataset.data
        
        n_samples, n_features = faces.shape
        
        # global centering
        faces_centered = faces - faces.mean(axis=0)
        
        print 'faces_centered has %d dimensions: ', faces_centered.shape
        
        # local centering
        faces_centered -= faces_centered.mean(axis=1).reshape(n_samples, -1)
        
        print("Dataset consists of %d faces" % n_samples)
        print("each face has %d features" % n_features )
        
        # List of the different estimators, whether to center and transpose the
        # problem, and whether the transformer uses the clustering API.
        estimators = [

            ('Independent components - FastICA',
             decomposition.FastICA(n_components=n_components, whiten=True),
             True),
             
        ]
        
        
        ###############################################################################
        # Plot a sample of the input data
        
        self.plotGallery("First centered Olivetti faces", faces_centered[:n_components])

        ###############################################################################
        # Do the estimation and plot it
        
        for name, estimator, center in estimators:
            print("Extracting the top %d %s..." % (n_components, name))
            t0 = time()
            data = faces
            if center:
                data = faces_centered
            estimator.fit(data)
            train_time = (time() - t0)
            print("done in %0.3fs" % train_time)
            if hasattr(estimator, 'cluster_centers_'):
                components_ = estimator.cluster_centers_
            else:
                components_ = estimator.components_
            if hasattr(estimator, 'noise_variance_'):
                self.plotGallery("Pixelwise variance",
                             estimator.noise_variance_.reshape(1, -1), n_col=1,
                             n_row=1)
            self.plotGallery('%s - Train time %.1fs' % (name, train_time),
                         components_[:n_components])
        
        plt.show()
开发者ID:Taohong01,项目名称:OCR,代码行数:69,代码来源:pcaocr.py


示例10: fetch_olivetti_faces

from sklearn.datasets import fetch_olivetti_faces
from sklearn.datasets import fetch_lfw_people
from sklearn.datasets import get_data_home


if __name__ == "__main__":
    fetch_olivetti_faces()

    print("Loading Labeled Faces Data (~200MB)")
    fetch_lfw_people(min_faces_per_person=70, resize=0.4)
    print("=> Success!")
    print("Data saved in %s" % get_data_home())
开发者ID:JeanKossaifi,项目名称:workshop_python,代码行数:12,代码来源:fetch_data.py


示例11: RandomState

from sklearn.datasets import fetch_olivetti_faces
from sklearn.cluster import MiniBatchKMeans
from sklearn import decomposition

# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
                    format='%(asctime)s %(levelname)s %(message)s')
n_row, n_col = 2, 3
n_components = n_row * n_col
image_shape = (64, 64)
rng = RandomState(0)

###############################################################################
# Load faces data
dataset = fetch_olivetti_faces(shuffle=True, random_state=rng)
faces = dataset.data

n_samples, n_features = faces.shape

# global centering
faces_centered = faces - faces.mean(axis=0)

# local centering
faces_centered -= faces_centered.mean(axis=1).reshape(n_samples, -1)

print("Dataset consists of %d faces" % n_samples)


###############################################################################
def plot_gallery(title, images, n_col=n_col, n_row=n_row):
开发者ID:Hydroinformatics-UNESCO-IHE,项目名称:scikit-learn,代码行数:30,代码来源:plot_faces_decomposition.py


示例12: loadData

def loadData():
    data = fetch_olivetti_faces()
    targets = data.target
    return data, targets
开发者ID:AkiraKane,项目名称:Python,代码行数:4,代码来源:face_completion_with_a_multi-output_estimators.py


示例13: OnlineLearningTest01

def OnlineLearningTest01():
	import time

	import matplotlib.pyplot as plt
	import numpy as np

	from sklearn import datasets
	from sklearn.cluster import MiniBatchKMeans
	from sklearn.feature_extraction.image import extract_patches_2d

	faces = datasets.fetch_olivetti_faces()

	print "Learning the dictionary..."
	rng = np.random.RandomState(0)

	kmeans = MiniBatchKMeans(n_clusters = 81, random_state = rng, verbose = True)
	patch_size = (20, 20)

	buffer = []
	index = 1
	t0 = time.time()

	#Online Learning
	index = 0

	for _ in range(6):
		for img in faces.images:
			data = extract_patches_2d(img, patch_size, max_patches = 50, random_state = rng)
			data = np.reshape(data, (len(data), -1))

			buffer.append(data)
			index += 1
			if index % 10 == 0:
				data = np.concatenate(buffer, axis = 0) #这里是把一个数组合并成矩阵

				#这里要先做标准化
				data -= np.mean(data, axis = 0)
				data /= np.std(data, axis = 0)
				kmeans.partial_fit(data) 	#每次都是调用partial_fit函数进行学习
				buffer = []

			if index % 100 == 0:
				print "Partial fit of %4i out of %i" % (index, 6 * len(faces.images))


	dt = time.time() - t0
	print "done in %.2fs. " % dt

	#plot result
	plt.figure(figsize = (4.2, 4))
	for i, patch in enumerate(kmeans.cluster_centers_):
		plt.subplot(9,9, i + 1)
		plt.imshow(patch.reshape(patch_size), cmap = plt.cm.gray, interpolation = "nearest")

		plt.xticks(())
		plt.xticks(())


	plt.suptitle('Patches of faces\nTrain time %.1fs on %d patches' % (dt, 8 * len(faces.images)), fontsize = 16)
	plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)

	plt.show()
开发者ID:hyliu0302,项目名称:scikit-learn-notes,代码行数:62,代码来源:myScikitLearnFcns.py


示例14: face_completion_Test01

def face_completion_Test01():
	import numpy as np
	import matplotlib.pyplot as plt

	from sklearn.datasets import fetch_olivetti_faces
	from sklearn.utils.validation import check_random_state

	from sklearn.ensemble import ExtraTreesRegressor
	from sklearn.neighbors import KNeighborsRegressor
	from sklearn.linear_model import LinearRegression
	from sklearn.linear_model import RidgeCV



	#load the faces datasets
	data = fetch_olivetti_faces()
	targets  = data.target

	#print len(data.data)
	#print len(data.data[0])  #data.data 是 400 * 4096 的数据

	#感觉这里的4096维和原图不一样啊...ravelled image
	#face = data.data[1].reshape(64,64)  #注意这里的data和image
	#face = data.images[1]
	#face_ccw_90 = zip(*face)[::-1]
	#face_cw_90 = zip(*face[::-1])

	#plt.imshow(face_cw_90, cmap = plt.cm.gray_r)
	#plt.show()

	#这里是为了做左右预测, 所以把原图旋转了90度
	#for i in range(len(data.images)):
	#	face = data.images[i]
	#	data.images[i] = face_cw_90 = zip(*face[::-1])




	#print data.images[0]
	data = data.images.reshape((len(data.images), -1)) #相当于就是data.data...把一张图片变成了一个行向量
	#print len(data[0])


	train = data[targets < 30]
	test = data[targets >= 30] #注意这里的test和targe没有关系

	n_faces = 5
	rng = check_random_state(4)

	#test.shape = [100, 4096]
	face_ids = rng.randint(test.shape[0], size = (n_faces, )) #这里相当于是在0-99中随机选择出5个数
	test = test[face_ids, :]

	#print face_ids

	n_pixels = data.shape[1]
	X_train = train[:, :np.ceil(0.5 * n_pixels)] #脸的上半部分
	Y_train = train[:, np.floor(0.5 * n_pixels):] #脸的下半部分
	X_test = test[:, :np.ceil(0.5 * n_pixels)] #相当于是那脸的前半部分预测后半部分 -- 是一个多对多的学习过程, train和test的维度相同
	Y_test = test[:, np.floor(0.5 * n_pixels):]

	#注意因为是要做completion, 所以是regression 而不是 classification
	#这里的ESTMATORS是一个字典
	ESTIMATORS = {
		"Extra trees": ExtraTreesRegressor(n_estimators = 10, max_features = 32, random_state = 0),
		"k-nn": KNeighborsRegressor(),
		"Linear regression": LinearRegression(),
		"Ridge": RidgeCV(),
	}

	#这里是直接进行预测, 也就是fit + predict的过程
	print "start fiting and predicting"
	y_test_predict = dict()
	for name, estimator in ESTIMATORS.items():
		estimator.fit(X_train, Y_train)
		y_test_predict[name] = estimator.predict(X_test)

	print "start plotting"


	#下面是画图

	image_shape = (64, 64)

	n_cols = 1 + len(ESTIMATORS)
	plt.figure(figsize=(2.0 * n_cols, 2.26 * n_faces))
	plt.suptitle("Face completion with multi-output estimators GoGoGo", size = 16)

	for i in range(n_faces):
		true_face = np.hstack((X_test[i], Y_test[i]))

		if i:
			sub = plt.subplot(n_faces, n_cols, i * n_cols + 1)
		else:
			sub = plt.subplot(n_faces, n_cols, i * n_cols + 1, title = "true faces")

		sub.axis("off")

		sub.imshow(true_face.reshape(image_shape), cmap = plt.cm.gray, interpolation = "nearest")

#.........这里部分代码省略.........
开发者ID:hyliu0302,项目名称:scikit-learn-notes,代码行数:101,代码来源:myScikitLearnFcns.py


示例15: get_olive

def get_olive():
	olive = datasets.fetch_olivetti_faces()
	return olive.data, olive.target
开发者ID:jamesfisk,项目名称:thesisc,代码行数:3,代码来源:neural.py


示例16: get_data

def get_data():  
    face_data=datasets.fetch_olivetti_faces()  
    #face_data=datasets.load_iris()  
    data=face_data.data  
    target=face_data.target  
    return data,target 
开发者ID:xieydd,项目名称:xieydd-s-respository,代码行数:6,代码来源:face-recognition_svm.py


示例17: RandomState

from numpy.random import RandomState
import matplotlib.pyplot as plt
import numpy as np

from sklearn.datasets import fetch_olivetti_faces
from sklearn import decomposition

from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.cross_validation import ShuffleSplit
# -- Prepare data and define utility functions ---------------------------------

image_shape = (64, 64)
rng = RandomState(0)

# Load faces data
dataset = fetch_olivetti_faces(data_home='/tmp/',shuffle=True, random_state=rng)
faces = dataset.data


n_samples, n_features = faces.shape

# global centering
faces_centered = faces - faces.mean(axis=0, dtype=np.float64)

print "Dataset consists of %d faces" % n_samples
print "********************************"

def plot_gallery(title, images,n_col,n_row):
    plt.figure(figsize=(2. * n_col, 2.26 * n_row))
    plt.suptitle(title, size=16)
    for i, comp in enumerate(images):
开发者ID:ykacer,项目名称:CES_Data_Scientist_2016,代码行数:31,代码来源:pca_nmf_faces.py


示例18:

__FILENAME__ = download_data
"""
Run this script to make sure data is cached in the appropriate
place on your computer.

The data are only a few megabytes, but conference wireless is
often not very reliable...
"""
import os
import sys
from sklearn import datasets

#------------------------------------------------------------
# Faces data: this will be stored in the scikit_learn_data
#             sub-directory of your home folder
faces = datasets.fetch_olivetti_faces()
print "Successfully fetched olivetti faces data"

#------------------------------------------------------------
# SDSS galaxy data: this will be stored in notebooks/datasets/data
sys.path.append(os.path.abspath('notebooks'))
from datasets import fetch_sdss_galaxy_mags
colors = fetch_sdss_galaxy_mags()
print "Successfully fetched SDSS galaxy data"


#------------------------------------------------------------
# SDSS filters & vega spectrum: stored in notebooks/figures/downloads
from figures.sdss_filters import fetch_filter, fetch_vega_spectrum
spectrum = fetch_vega_spectrum()
print "Successfully fetched vega spectrum"
开发者ID:Mondego,项目名称:pyreco,代码行数:31,代码来源:allPythonContent.py


示例19: f

import pylab
import pickle
import numpy
import theano
import theano.tensor as T
from theano.tensor.signal import downsample
from theano.tensor.nnet import conv
from logistic_sgd import LogisticRegression, load_data
from mlp import HiddenLayer
from convolutional_mlp import LeNetConvPoolLayer
from sklearn import datasets
# load the saved model
layer0,layer1,layer2,layer3 = pickle.load(open('weight.pkl','rb'))

face=datasets.fetch_olivetti_faces(shuffle=True)
x=face.data[0,:]
x=x.reshape(1,1,64,64)

input = T.tensor4(name='input')

conv_out = conv.conv2d(input,filters=layer0.params[0])
pooled_out = downsample.max_pool_2d(
            input=conv_out,
            ds=(2,2),
            ignore_border=True
        )
output = T.tanh(pooled_out + layer0.params[1].dimshuffle('x', 0, 'x', 'x'))
f = theano.function([input], output)
filtered_img = f(x)
pylab.gray();
pylab.subplot(1, 3, 1) 
开发者ID:Coderx7,项目名称:CNN,代码行数:31,代码来源:loaddraw.py


示例20: load_faces

def load_faces():
    X = datasets.fetch_olivetti_faces()
    X.data.dtype='float64'
    return (NATURAL, X)
开发者ID:kushalarora,项目名称:ManifoldAlgorithms,代码行数:4,代码来源:load_datasets.py



注:本文中的sklearn.datasets.fetch_olivetti_faces函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python datasets.get_data_home函数代码示例发布时间:2022-05-27
下一篇:
Python datasets.fetch_mldata函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap