本文整理汇总了Python中scipy.io.mmread函数的典型用法代码示例。如果您正苦于以下问题:Python mmread函数的具体用法?Python mmread怎么用?Python mmread使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了mmread函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: eigenbase
def eigenbase (h1, d1, E, v0, pow0, pow1, rest):
# compute all eigenvalues and eigenvectors
pt0 = 'out/impacting-bar/MK_%g_%g_%g_%g_%d_%d'%(h1, d1, E, v0, pow0, pow1)
sl0 = SOLFEC ('DYNAMIC', 1E-3, pt0)
bl0 = BULK_MATERIAL (sl0, model = 'KIRCHHOFF', young = E, poisson = PoissonRatio, density = MassDensity)
bod = BODY (sl0, 'FINITE_ELEMENT', COPY (mesh), bl0)
eval = [] # selected eigenvalue list
evec = [] # selected eigenvector list (BODY command takes a tuple (eval, evec) argument for the RO formulation)
vsel = (0,1,2,3,4,5,13,18,25,33,38)
if 0:
BODY_MM_EXPORT (bod, pt0+'/M.mtx', pt0+'/K.mtx')
M = mmread (pt0+'/M.mtx').todense()
K = mmread (pt0+'/K.mtx').todense()
for j in range (0, K.shape[1]):
for i in range (j+1, K.shape[0]):
K [j, i] = K [i, j] # above diagonal = below diagonal
x, y = eigh (K, M) # this produces y.T M y = 1 and y.T K y = x */
for j in vsel:
eval.append (x[j].real)
for z in y[:,j]:
evec.append (z.real)
else:
data0 = MODAL_ANALYSIS (bod, 45, pt0 + '/modal.data', verbose = 'ON', abstol = 1E-14)
ndofs = mesh.nnod * 3
for j in vsel:
eval.append (data0[0][j])
for k in range (j*ndofs,(j+1)*ndofs):
evec.append (data0[1][k])
return (eval, evec)
开发者ID:KonstantinosKr,项目名称:solfec,代码行数:29,代码来源:impacting-bar.py
示例2: ro0_modal_base
def ro0_modal_base (use_scipy=False, verbose='OFF'):
sol = ro0_model (1E-3, 0.0)
bod = sol.bodies[0]
eval = [] # selected eigenvalue list
evec = [] # selected eigenvector list
vsel = (0,1,2,3,4,5,13,18,25,33,38)
if use_scipy:
BODY_MM_EXPORT (bod, 'out/reduced-order0/M.mtx',
'out/reduced-order0/K.mtx')
M = mmread ('out/reduced-order0/M.mtx').todense()
K = mmread ('out/reduced-order0/K.mtx').todense()
for j in range (0, K.shape[1]):
for i in range (j+1, K.shape[0]):
K [j, i] = K [i, j] # above diagonal = below diagonal
x, y = eigh (K, M) # this produces y.T M y = 1 and y.T K y = x
for j in vsel:
eval.append (x[j].real)
for z in y[:,j]:
evec.append (z.real)
else:
data0 = MODAL_ANALYSIS (bod, 45, 'out/reduced-order0/modal',
1E-13, 1000, verbose)
dofs = len(bod.velo)
for j in vsel:
eval.append (data0[0][j])
for k in range (j*dofs,(j+1)*dofs):
evec.append (data0[1][k])
return (eval, evec)
开发者ID:tkoziara,项目名称:solfec,代码行数:29,代码来源:ro0-lib.py
示例3: applySVMWithPCA
def applySVMWithPCA():
'''
Same as the previous function, just change the file names..
'''
data = io.mmread(ROOTDIR+"TRAINDATA.mtx")
label = np.load(ROOTDIR+"label_train.npy")
testdata = io.mmread(ROOTDIR+"TESTDATA.mtx")
testLabel = np.load(ROOTDIR + "label_test.npy")
linear_svm = LinearSVC(C=1.0, class_weight=None, loss='hinge', dual=True, fit_intercept=True,
intercept_scaling=1, multi_class='ovr', penalty='l2',
random_state=None, tol=0.0001, verbose=1, max_iter=2000)
data = scale(data, with_mean=False)
linear_svm.fit(data, label)
joblib.dump(linear_svm, ROOTDIR+'originalTrain_hinge_2000.pkl')
# linear_svm = joblib.load(ROOTDIR+'originalTrain_hinge_2000.pkl')
print 'Trainning Done!'
scr = linear_svm.score(data, label)
print 'accuracy on the training set is:' + str(scr)
predLabel = linear_svm.predict(data)
calcualteRMSE(label, predLabel)
scr = linear_svm.score(testdata, testLabel)
print 'accuracy on the testing set is:' + str(scr)
predLabel = linear_svm.predict(testdata)
calcualteRMSE(testLabel, predLabel)
开发者ID:cyinv,项目名称:10601Project-KDD2010,代码行数:31,代码来源:Preprocessing.py
示例4: read_input_tensor
def read_input_tensor(headers_filename, data_file_names, tensor_slices, adjustDim=False, offerString="Attr: OFFER",
wantString="Attr: WANT"):
#load the header file
_log.info("Read header input file: " + headers_filename)
input = codecs.open(headers_filename,'r',encoding='utf8')
headers = input.read().splitlines()
input.close()
# get the largest dimension of all slices
if adjustDim:
maxDim = 0
for data_file in data_file_names:
matrix = mmread(data_file)
if maxDim < matrix.shape[0]:
maxDim = matrix.shape[0]
if maxDim < matrix.shape[1]:
maxDim = matrix.shape[1]
# load the data files
slice = 0
tensor = SparseTensor(headers, offerString, wantString)
for data_file in data_file_names:
if adjustDim:
adjusted = adjust_mm_dimension(data_file, maxDim)
if adjusted:
_log.warn("Adujst dimension to (%d,%d) of matrix file: %s" % (maxDim, maxDim, data_file))
_log.info("Read as slice %d the data input file: %s" % (slice, data_file))
matrix = mmread(data_file)
tensor.addSliceMatrix(matrix, tensor_slices[slice])
slice = slice + 1
return tensor
开发者ID:FedericoMarroni,项目名称:wonpreprocessing,代码行数:32,代码来源:tensor_utils.py
示例5: get_debug
def get_debug(data):
full_train = sio.mmread('data/%s_train.mtx' % data).tocsr()
(nu, nm) = full_train.shape
print 'sampling'
debug_mids = sample(range(nm), nm / 5)
debug_uids = sample(range(nu), nu / 5)
debug = full_train[debug_uids][:, debug_mids].tocoo()
nr = debug.nnz
train_ids, _, test_ids = sample_split(nr)
# build matrix from given indices
print 'writing debug_train'
debug_train = coo_matrix(
(debug.data[train_ids], (debug.row[train_ids], debug.col[train_ids])), debug.shape)
sio.mmwrite('data/%s_debug_train.mtx' % data, debug_train)
print 'writing debug_test'
debug_test = coo_matrix(
(debug.data[test_ids], (debug.row[test_ids], debug.col[test_ids])), debug.shape)
sio.mmwrite('data/%s_debug_test.mtx' % data, debug_test)
# build movie mtx from debug_mids
print 'movie debug'
movies = sio.mmread('data/movies.mtx').tocsr()
movies_debug = movies[debug_mids]
sio.mmwrite('data/movies_%s_debug.mtx' % data, movies_debug)
return debug, debug_train, debug_test, movies_debug
开发者ID:supasorn,项目名称:LinkPrediction,代码行数:29,代码来源:lib_transform.py
示例6: load
def load(ppt, samples, l_tau, l_lc, l_regtype, b_tau, b_lc, b_regtype):
ln = np.loadtxt('lin-models/bestlinwtln'+l_regtype+samples+'tau'+l_tau+'lc'+l_lc+ppt+'.txt')
lv = np.loadtxt('lin-models/bestlinwtlv'+l_regtype+samples+'tau'+l_tau+'lc'+l_lc+ppt+'.txt')
bv = np.loadtxt('bil-models/bestbilwtbn'+b_regtype+samples+'tau'+b_tau+'eta'+b_lc+ppt+'.txt')
bn = np.loadtxt('bil-models/bestbilwtbv'+b_regtype+samples+'tau'+b_tau+'eta'+b_lc+ppt+'.txt')
traindata = [(d.strip().split()[1:5], d.strip().split()[5]) for d in open('clean/cleantrain.txt')]
devdata = [(d.strip().split()[1:5], d.strip().split()[5]) for d in open('clean/cleandev.txt')]
testdata = [(d.strip().split()[1:5], d.strip().split()[5]) for d in open('clean/cleantest.txt')]
traindata = traindata[:int(samples)]
phih = sio.mmread('clean/trh1k.mtx')
phim = sio.mmread('clean/trm1k.mtx')
phidh = sio.mmread('clean/devh1k.mtx')
phidm = sio.mmread('clean/devm1k.mtx')
maph = np.loadtxt('clean/forhead.txt', dtype=str)
mapm = np.loadtxt('clean/formod.txt', dtype=str)
mapdh = np.loadtxt('clean/devheads.txt', dtype=str)
mapdm = np.loadtxt('clean/devmods.txt', dtype=str)
trainingdat = bilme.BilinearMaxentFeatEncoding.train(traindata, phih, phim, maph, mapm, pptype=ppt)
traintoks = trainingdat.train_toks()
traintokens = [(co.word_features(t),l) for t,l in trainingdat.train_toks()]
devencode = bilme.BilinearMaxentFeatEncoding.train(devdata, phidh, phidm, mapdh, mapdm, pptype=ppt)
devtoks = devencode.train_toks()
devtokens = [(co.word_features(t),l) for t,l in devencode.train_toks()]
data = [devtoks, devtokens]
trlinencoding = maxent.BinaryMaxentFeatureEncoding.train(traintokens)
return trlinencoding, devencode, [ln, lv], [bn, bv], data
开发者ID:f00barin,项目名称:bilppattach,代码行数:35,代码来源:combine.py
示例7: generate_valid_repos_and_times
def generate_valid_repos_and_times(dataset_dir):
"""Function called to generate VALID_REPOS_AND_TIMES in `dataset_dir`
"""
valid_repos_and_times = []
repos_users_times_fn = join(dataset_dir, TIMED_INTERESTS_FN)
u_r_t = mmread(repos_users_times_fn).transpose().tocsr()
validation_repos_fn = join(dataset_dir, VALIDATING_FN)
validation_matrix = mmread(validation_repos_fn).tocsr()
v_u_r_t = u_r_t.multiply(validation_matrix).tolil()
for uidx in xrange(v_u_r_t.shape[0]):
v_r_t_coo = v_u_r_t.getrowview(uidx).tocoo()
sorted_index = np.argsort(v_r_t_coo.data)
times = v_r_t_coo.data[sorted_index]
repos = v_r_t_coo.col[sorted_index]
valid_repos_and_times.append(np.vstack((times,repos)))
pt_fn = join(dataset_dir, VALID_REPOS_AND_TIMES)
with open(pt_fn, "wb") as pf:
cPickle.dump(valid_repos_and_times, pf, cPickle.HIGHEST_PROTOCOL)
return pt_fn
开发者ID:fenekku,项目名称:Masters,代码行数:25,代码来源:prediction_times.py
示例8: main
def main():
import os
import logging
import subprocess
from optparse import OptionParser
import numpy as np
from scipy.io import mmread
from mrec import save_recommender
from mrec.mf.recommender import MatrixFactorizationRecommender
from filename_conventions import get_modelfile
logging.basicConfig(level=logging.INFO,format='[%(asctime)s] %(levelname)s: %(message)s')
parser = OptionParser()
parser.add_option('--factor_format',dest='factor_format',help='format of factor files tsv | mm (matrixmarket) | npy (numpy array)')
parser.add_option('--user_factors',dest='user_factors',help='user factors filepath')
parser.add_option('--item_factors',dest='item_factors',help='item factors filepath')
parser.add_option('--train',dest='train',help='filepath to training data, just used to apply naming convention to output model saved here')
parser.add_option('--outdir',dest='outdir',help='directory for output')
parser.add_option('--description',dest='description',help='optional description of how factors were computed, will be saved with model so it can be output with evaluation results')
(opts,args) = parser.parse_args()
if not opts.factor_format or not opts.user_factors or not opts.item_factors \
or not opts.outdir:
parser.print_help()
raise SystemExit
model = MatrixFactorizationRecommender()
logging.info('loading factors...')
if opts.factor_format == 'npy':
model.U = np.load(opts.user_factors)
model.V = np.load(opts.item_factors)
elif opts.factor_format == 'mm':
model.U = mmread(opts.user_factors)
model.V = mmread(opts.item_factors)
elif opts.factor_format == 'tsv':
model.U = np.loadtxt(opts.user_factors)
model.V = np.loadtxt(opts.item_factors)
else:
raise ValueError('unknown factor format: {0}'.format(factor_format))
if opts.description:
model.description = opts.description
logging.info('saving model...')
logging.info('creating output directory {0}...'.format(opts.outdir))
subprocess.check_call(['mkdir','-p',opts.outdir])
modelfile = get_modelfile(opts.train,opts.outdir)
save_recommender(model,modelfile)
logging.info('done')
开发者ID:KobeDeShow,项目名称:mrec,代码行数:57,代码来源:factors.py
示例9: fit_lightfm_model
def fit_lightfm_model():
""" Fit the lightFM model
returns d_user_pred, list_user, list_coupon
list_coupon = list of test coupons
list_user = list of user ID
d_user_pred : key = user, value = predicted ranking of coupons in list_coupon
"""
#Load data
Mui_train = spi.mmread("../Data/Data_translated/biclass_user_item_train_mtrx.mtx")
uf = spi.mmread("../Data/Data_translated/user_feat_mtrx.mtx")
itrf = spi.mmread("../Data/Data_translated/train_item_feat_mtrx.mtx")
itef = spi.mmread("../Data/Data_translated/test_item_feat_mtrx.mtx")
#Print shapes as a check
print "user_features shape: %s,\nitem train features shape: %s,\nitem test features shape: %s" % (uf.shape, itrf.shape, itef.shape)
#Load test coupon and user lists
cplte = pd.read_csv("../Data/Data_translated/coupon_list_test_translated.csv")
ulist = pd.read_csv("../Data/Data_translated/user_list_translated.csv")
list_coupon = cplte["COUPON_ID_hash"].values
list_user = ulist["USER_ID_hash"].values
#Build model
no_comp, lr, ep = 10, 0.01, 5
model = LightFM(no_components=no_comp, learning_rate=lr, loss='warp')
model.fit_partial(Mui_train, user_features = uf, item_features = itrf, epochs = ep, num_threads = 4, verbose = True)
test = sps.csr_matrix((len(list_user), len(list_coupon)), dtype = np.int32)
no_users, no_items = test.shape
pid_array = np.arange(no_items, dtype=np.int32)
#Create and initialise dict to store predictions
d_user_pred = {}
for user in list_user :
d_user_pred[user] = []
# Loop over users and compute predictions
for user_id, row in enumerate(test):
sys.stdout.write("\rProcessing user " + str(user_id)+"/ "+str(len(list_user)))
sys.stdout.flush()
uid_array = np.empty(no_items, dtype=np.int32)
uid_array.fill(user_id)
predictions = model.predict(uid_array, pid_array,user_features = uf, item_features = itef, num_threads=4)
user = str(list_user[user_id])
# apply MinMaxScaler for blending later on
MMS = MinMaxScaler()
pred = MMS.fit_transform(np.ravel(predictions))
d_user_pred[user] = pred
# Pickle the predictions for future_use
d_pred = {"list_coupon" : list_coupon.tolist(), "d_user_pred" : d_user_pred}
with open("../Data/Data_translated/d_pred_lightfm.pickle", "w") as f:
pickle.dump(d_pred, f, protocol = pickle.HIGHEST_PROTOCOL)
return d_user_pred, list_user, list_coupon
开发者ID:VinACE,项目名称:Kaggle,代码行数:57,代码来源:ponpare_lightfm.py
示例10: goMusic
def goMusic(K=80,steps=200,resume=False,normalize=True,R=None,V=None,mean_center=False,beta=0.0,betaO=0.0,normalizer=normalizer,doBias=True,every=1,doFactors=True,biasSteps=10):
#R = mmread("reviews_Musical_Instruments.mtx").tocsr()
if R == None:
R = mmread("training.mtx").tocsr().toarray()
else:
R = R.toarray()
if V == None:
V = mmread("validation.mtx").todok()
mu = np.finfo(float).eps
if normalize:
R = normalizer(R,1,0)
print "normalizing, min/max", R.min(),R.max()
#R = R[0:424,:]
if not resume:
P = normalizer(np.random.rand(R.shape[0],K),.1,0)
Q = normalizer(np.asfortranarray(np.random.rand(K,R.shape[1])),.1,0)
#bP,bQ = makeAvgBaseline(R)
#print bP,bQ
bP = None # np.zeros(R.shape[0])#None
bQ = None #np.zeros(R.shape[1])#None#(R > 0).mean(axis=0)
#bP,bQ = makeAvgBaseline(R)
else:
P = np.loadtxt("P.txt")
Q = np.loadtxt("Q.txt")
bP = np.loadtxt("bP.txt")
bQ = np.loadtxt("bQ.txt")
print R.shape,P.shape,Q.shape
print "starting doFactO"
#chunkFactO(R,P,Q,K,steps=steps,chunks=1,discard=0)#chunks=800,discard=0)
#R,P,Q,bP,bQ = factO(R,P,Q,K,steps=steps,discard=0,bP=bP,bQ=bQ,beta=beta,betaO=betaO)
rmses,maes,errs = [],[],[]
def validation(P,Q,bP,bQ):
rmse,mae,err = validate(T=R,V=V,P=P,Q=Q,bP=bP,bQ=bQ)
rmses.append(rmse)
maes.append(mae)
errs.append(err)
R,P,Q,bP,bQ,t_rmses = sigFactO(R,P,Q,K,bP=bP,bQ=bQ,steps=steps,discard=0.0,beta=beta,betaO=betaO,mean_center=mean_center,doBias=doBias,validate=validation,every=every,doFactors=doFactors,biasSteps=biasSteps)
if normalize:
R = renormalizer(R,1,0,5,0)
dumparrays(R,P,Q,bP,bQ)
return t_rmses,rmses,maes,errs
开发者ID:jsvidt,项目名称:-Nearest-Neighbour-Collaborative-Recommender-with-Threshold-Filtering,代码行数:57,代码来源:fact.py
示例11: main
def main():
train_tfidf = sio.mmread(tfidf_train_file)
test_tfidf = sio.mmread(tfidf_test_file)
svd = TruncatedSVD(400)
svd_X_train = svd.fit_transform(train_tfidf)
svd_X_test = svd.transform(test_tfidf)
sio.mmwrite('train_tfidf_2013_svd_400_mtx', svd_X_train)
sio.mmwrite('test_tfidf_svd_400_mtx', svd_X_test)
开发者ID:chyikwei,项目名称:kdd2014,代码行数:10,代码来源:tfidf_to_svd.py
示例12: create_tox21
def create_tox21(sparsity_cutoff, validation_fold, dtype=np.float32, download_directory=_DATA_DIRECTORY):
urlbase = "http://www.bioinf.jku.at/research/deeptox/"
dst = os.path.join(download_directory, "raw")
fn_x_tr_d = _download_file(urlbase, "tox21_dense_train.csv.gz", dst)
fn_x_tr_s = _download_file(urlbase, "tox21_sparse_train.mtx.gz", dst)
fn_y_tr = _download_file(urlbase, "tox21_labels_train.csv", dst)
fn_x_te_d = _download_file(urlbase, "tox21_dense_test.csv.gz", dst)
fn_x_te_s = _download_file(urlbase, "tox21_sparse_test.mtx.gz", dst)
fn_y_te = _download_file(urlbase, "tox21_labels_test.csv", dst)
cpd = _download_file(urlbase, "tox21_compoundData.csv", dst)
y_tr = pd.read_csv(fn_y_tr, index_col=0)
y_te = pd.read_csv(fn_y_te, index_col=0)
x_tr_dense = pd.read_csv(fn_x_tr_d, index_col=0).values
x_te_dense = pd.read_csv(fn_x_te_d, index_col=0).values
x_tr_sparse = io.mmread(fn_x_tr_s).tocsc()
x_te_sparse = io.mmread(fn_x_te_s).tocsc()
# filter out very sparse features
sparse_col_idx = ((x_tr_sparse > 0).mean(0) >= sparsity_cutoff).A.ravel()
x_tr_sparse = x_tr_sparse[:, sparse_col_idx].A
x_te_sparse = x_te_sparse[:, sparse_col_idx].A
dense_col_idx = np.where(x_tr_dense.var(0) > 1e-6)[0]
x_tr_dense = x_tr_dense[:, dense_col_idx]
x_te_dense = x_te_dense[:, dense_col_idx]
# The validation set consists of those samples with
# cross validation fold #5
info = pd.read_csv(cpd, index_col=0)
f = info.CVfold[info.set != "test"].values
idx_va = f == float(validation_fold)
# normalize features
from sklearn.preprocessing import StandardScaler
s = StandardScaler()
s.fit(x_tr_dense[~idx_va])
x_tr_dense = s.transform(x_tr_dense)
x_te_dense = s.transform(x_te_dense)
x_tr_sparse = np.tanh(x_tr_sparse)
x_te_sparse = np.tanh(x_te_sparse)
x_tr = np.hstack([x_tr_dense, x_tr_sparse])
x_te = np.hstack([x_te_dense, x_te_sparse])
return (
x_tr[~idx_va].astype(dtype, order="C"),
y_tr[~idx_va].values.astype(dtype, order="C"),
x_tr[idx_va].astype(dtype, order="C"),
y_tr[idx_va].values.astype(dtype, order="C"),
x_te.astype(dtype, order="C"),
y_te.values.astype(dtype, order="C"),
)
开发者ID:thejonan,项目名称:binet,代码行数:55,代码来源:datasets.py
示例13: main
def main():
FORMAT = '%(asctime)s %(levelname)s %(message)s'
logging.basicConfig(format=FORMAT)
logging.getLogger().setLevel(logging.INFO)
args = parse_args()
lang_map = {i: fn for i, fn in enumerate(sorted(listdir(args.lang_map)))}
if args.train.endswith('.mtx'):
mtx = mmread(args.train).todense()
t_mtx = mmread(args.test).todense()
else:
with open(args.train) as stream:
mtx = np.loadtxt(stream, np.float64)
with open(args.test) as stream:
t_mtx = np.loadtxt(stream, np.float64)
labels = np.ravel(mtx[:, 0])
test_labels = t_mtx[:, 0]
test_mtx = t_mtx[:, 1:]
if args.scale:
train = scale(mtx[:, 1:], with_mean=False)
else:
train = mtx[:, 1:]
kwargs = {}
for a in args.params:
k, v = a.split('=')
try:
v = int(v)
except:
pass
kwargs[k] = v
r = Representation(args.encoder, args.classifier, **kwargs)
r.encode(train)
logging.info('Matrix encoded')
r.train_classifier(labels)
logging.info('Model trained')
acc = 0
N = 0
for vec_ in test_mtx:
vec = np.ravel(vec_)
cl = r.classify_vector(vec, with_probs=args.with_probs)
try:
lab = test_labels[N, 0]
except IndexError:
lab = test_labels[N]
N += 1
if args.with_probs:
guess = max(enumerate(cl[0, :]), key=lambda x: x[1])[0]
print('{0}\t{1}\t{2}'.format('\t'.join(map(str, cl[0, :])), lang_map[guess], lang_map[int(lab)]))
else:
try:
guess = int(cl[0, 0])
except IndexError:
guess = int(cl + 0.5)
print('{0}\t{1}'.format(lang_map[guess], lang_map[int(lab)]))
if int(guess) == int(lab):
acc += 1
开发者ID:juditacs,项目名称:dsl,代码行数:55,代码来源:run_experiment.py
示例14: create_bars
def create_bars (h1, E, frict, damp, formulation):
# compute all eigenvalues and eigenvectors
if formulation == 'RO':
pt0 = 'out/16-bars/MK_%g_%g_%g_%g'%(h1, E, frict, damp)
sl0 = SOLFEC ('DYNAMIC', 1E-3, pt0)
bl0 = BULK_MATERIAL (sl0, model = 'KIRCHHOFF', young = E, poisson = PoissonRatio, density = MassDensity)
bod = BODY (sl0, 'FINITE_ELEMENT', COPY (mesh), bl0)
eval = [] # selected eigenvalue list
evec = [] # selected eigenvector list (BODY command takes a tuple (eval, evec) argument for the RO formulation)
vsel = range (0, 32)
if 0:
BODY_MM_EXPORT (bod, pt0+'/M.mtx', pt0+'/K.mtx')
M = mmread (pt0+'/M.mtx').todense()
K = mmread (pt0+'/K.mtx').todense()
for j in range (0, K.shape[1]):
for i in range (j+1, K.shape[0]):
K [j, i] = K [i, j] # above diagonal = below diagonal
x, y = eigh (K, M) # this produces y.T M y = 1 and y.T K y = x */
for j in vsel:
eval.append (x[j].real)
for z in y[:,j]:
evec.append (z.real)
else:
data0 = MODAL_ANALYSIS (bod, 45, pt0 + '/modal.data', verbose = 'ON', abstol = 1E-14)
ndofs = mesh.nnod * 3
for j in vsel:
eval.append (data0[0][j])
for k in range (j*ndofs,(j+1)*ndofs):
evec.append (data0[1][k])
data = (eval, evec)
# 16 bars domain
sl2 = SOLFEC ('DYNAMIC', h1, 'out/16-bars/%s_%g_%g_%g_%g'%(formulation, h1, E, frict, damp))
SURFACE_MATERIAL (sl2, model = 'SIGNORINI_COULOMB', friction = frict, restitution = 0.0)
bl2 = BULK_MATERIAL (sl2, model = 'KIRCHHOFF', young = E, poisson = PoissonRatio, density = MassDensity)
GRAVITY (sl2, (0, 0, -9.8))
for i in range (0, nw):
for j in range (0, nw):
shp = COPY (mesh)
TRANSLATE (shp, ((1-nw)*0.05+0.1*i, (1-nw)*0.05+0.1*j, 0))
if formulation == 'RO':
bd2 = BODY (sl2, 'FINITE_ELEMENT', shp, bl2, form = formulation, modal = data)
bd2.scheme = 'DEF_LIM'
bd2.damping = damp
elif formulation == 'BC':
bd2 = BODY (sl2, 'FINITE_ELEMENT', shp, bl2, form = formulation)
bd2.scheme = 'DEF_LIM'
bd2.damping = damp
else: bd2 = BODY (sl2, 'RIGID', shp, bl2)
BODY (sl2, 'OBSTACLE', COPY (obsm), bl2)
return sl2
开发者ID:KonstantinosKr,项目名称:solfec,代码行数:54,代码来源:16-bars.py
示例15: validate
def validate(trunc = False,T = None,V = None,doRound=False,activation=sigmoid,P=None,Q=None,bP=None,bQ=None):
if T == None:
Rtraining = mmread('training.mtx').tocsr()
else:
Rtraining = T
if V == None:
R = mmread('validation.mtx').todok()
else:
R = V.todok()
mean = (Rtraining.sum()) / (Rtraining > 0).sum()
if not (P != None or Q != None or bP != None or bQ != None):
P,Q,bP,bQ = np.loadtxt("P.txt"),np.loadtxt("Q.txt"),np.loadtxt("bP.txt"),np.loadtxt("bQ.txt")
print R.shape,P.shape,Q.shape
i = 0
sum = 0
sumAbs = 0
lte1 = 0
sumlte1 = 0
errors = []
for k,v in R.items():
g = bP[k[0]] + bQ[k[1]] + np.dot(P[k[0],:],Q[:,k[1]])
#if trunc:
# g = min(1,max(5,g))
#for i in xrange(P.shape[1]):
# g += (P[k[0],i]) * (Q[i,k[1]])
#
# if trunc:
# g = max(1,min(g,5))
g = activation(mean + g)
g = renormalizefloat(g,1,0,5,0)
if doRound:
g = round(g)
e = (v - g)**2
sumAbs += math.sqrt((v - g)**2)
errors.append(e)
if e < 1.00001:
lte1 += 1
sumlte1 += e
sum += e
#if e > 5:
#print i,v,g,e
i+=1
rmse = math.sqrt(sum/R.nnz)
mae = sumAbs / R.nnz
print "rmse",rmse
print "mae",sumAbs / R.nnz
print "lte1",lte1,len(R.items()), lte1/float(len(R.items()))
print "lte1 rmse",math.sqrt((sumlte1 +1) / (lte1+1))
print "validation mean",mean
return rmse,mae,np.array(errors)
开发者ID:jsvidt,项目名称:-Nearest-Neighbour-Collaborative-Recommender-with-Threshold-Filtering,代码行数:54,代码来源:fact.py
示例16: __init__
def __init__(self,train_file,test_file):
"""
Read datasets from the specified files.
"""
train = mmread(train_file)
test = mmread(test_file)
train = train.tocsc()
test = test.tocsc()
self.trainXList = [train]
self.testXList = [test]
开发者ID:charanpald,项目名称:wallhack,代码行数:12,代码来源:StaticDataset.py
示例17: load_or_create_matrices
def load_or_create_matrices():
try:
csr_sparse_ing = spio.mmread("csr_sparse_ing.mtx")
except IOError:
csr_sparse_ing = create_csr_sparse_ing()
csr_filtered_ing = []
for i in np.arange(1, 11):
try:
csr_filtered_ing.append(spio.mmread("csr_filtered_ing" + str(i) + ".mtx"))
except IOError:
csr_filtered_ing.append(create_filtered_csr_ing(csr_sparse_ing, i))
return csr_sparse_ing, csr_filtered_ing
开发者ID:djddenis,项目名称:Whats-Cooking,代码行数:13,代码来源:whats-cooking.py
示例18: load_sparse_matrix
def load_sparse_matrix(input_format,filepath):
"""
Load a scipy.sparse.csr_matrix from an input file of the specified format.
Parameters
----------
input_format : str
Specifies the file format:
- tsv
- csv
- mm (MatrixMarket)
- npz (scipy.sparse.csr_matrix serialized with mrec.sparse.savez())
- fsm (mrec.sparse.fast_sparse_matrix)
filepath : str
The file to load.
"""
if input_format == 'tsv':
return loadtxt(filepath)
elif input_format == 'csv':
return loadtxt(filepath,delimiter=',')
elif input_format == 'mm':
return mmread(filepath).tocsr()
elif input_format == 'npz':
return loadz(filepath).tocsr()
elif input_format == 'fsm':
return fast_sparse_matrix.load(filepath).X
raise ValueError('unknown input format: {0}'.format(input_format))
开发者ID:KobeDeShow,项目名称:mrec,代码行数:27,代码来源:__init__.py
示例19: main
def main(X_fname, Y_fname, result_fname=None):
le = LabelEncoder()
moves = pandas.read_csv(Y_fname, index_col=0)
Y = moves.values.ravel()
Y = le.fit_transform(Y)
X = io.mmread(X_fname)
print X.shape, Y.shape, len(le.classes_)
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.33)
xg_train = xgboost.DMatrix( X_train, label=y_train)
xg_test = xgboost.DMatrix(X_test, label=y_test)
param = {}
# use softmax multi-class classification
param['objective'] = 'multi:softprob'
param['eta'] = 0.002
param['max_depth'] = 7
param['nthread'] = 7
param['num_class'] = len(le.classes_)
param['eval_metric'] = 'merror'
evals = [ (xg_train, 'train'), (xg_test, 'eval') ]
# Train xgboost
print "Training"
t1 = time.time()
bst = xgboost.train(param, xg_train, 500, evals, early_stopping_rounds=3)
t2 = time.time()
print t2-t1
if result_fname is None:
result_fname = str(datetime.now())
bst.save_model("%s.bst"%result_fname)
开发者ID:smurching,项目名称:pokemon_ai,代码行数:34,代码来源:model_xgb.py
示例20: __init__
def __init__(self, interest_metric, dataset_dir, store_atmost):
self.interest_metric = interest_metric
self.dataset_dir = dataset_dir
self.store_atmost = store_atmost
self.u_r_t = mmread(join(dataset_dir, TIMED_INTERESTS_FN)).transpose()
self.prediction_times = cPickle.load(open(join(dataset_dir, PREDICTION_TIMES_FN),"rb"))
self.NU, self.NR = self.u_r_t.shape
开发者ID:fenekku,项目名称:Masters,代码行数:7,代码来源:interest_scores.py
注:本文中的scipy.io.mmread函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论