• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python numpy.place函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中numpy.place函数的典型用法代码示例。如果您正苦于以下问题:Python place函数的具体用法?Python place怎么用?Python place使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了place函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: score_MCC

def score_MCC(ground_truth, scores):
    '''
    assuming the model output is the probability of being default,
    then this probability can be used for ranking. Then using the fraction of
    default in validation data to assign the proper threshold to the prediction
    '''

    if isinstance(scores, pd.Series):
        scores = scores.values

    if isinstance(ground_truth, pd.Series):
        ground_truth = ground_truth.values

    tmp_ground_truth = np.copy(ground_truth)
    fault_frac = tmp_ground_truth.mean()
    #print 'score shape:', scores.shape, 
    print 'mean of groud truth:', fault_frac
    thres_value = np.percentile(scores, 100.*(1-fault_frac), axis=0)
    print 'threshold for preds:', thres_value
    binary_scores = scores > thres_value
    binary_scores = binary_scores.astype(int)
    ## convert to sk-learn format
    np.place(binary_scores, binary_scores==0, -1)
    np.place(tmp_ground_truth, tmp_ground_truth==0, -1)

    return matthews_corrcoef(tmp_ground_truth, binary_scores)
开发者ID:mengyx-work,项目名称:xgboost_hyperopt,代码行数:26,代码来源:validation_tools.py


示例2: conditional_logsumexp

 def conditional_logsumexp(where, axis):
     masked = -np.ones(a.shape) * np.inf
     np.copyto(masked, a, where = where)
     masked_sum = logsumexp(masked, axis = axis)
     #np.copyto(masked_sum,  -np.ones(masked_sum.shape) * np.inf, where = np.isnan(masked_sum)) 
     np.place(masked_sum, np.isnan(masked_sum), -np.inf)
     return masked_sum
开发者ID:ingmarschuster,项目名称:ModelSelection,代码行数:7,代码来源:estimator_statistics.py


示例3: make_mask_map_4d

def make_mask_map_4d(data, infile, outfile):
    """ Make mask map with 4d dimeions
    data: values for levels in infile. Shape = [4th dimension, regions]
    infile: input file to replace levels with values
    outfile: output file name
    """
    from neurosynth.base.mask import Masker
    from neurosynth.base import imageutils
    from nibabel import nifti1

    data = np.array(data)

    # Load image with masker
    masker = Masker(infile)
    img = imageutils.load_imgs(infile, masker)

    header = masker.get_header()

    shape = header.get_data_shape()[0:3] + (data.shape[0],)
    header.set_data_shape(shape)

    result = []

    for t_dim, t_val in enumerate(data):
        result.append(img.copy())
        for num, value in enumerate(t_val):
            np.place(result[t_dim], img == num + 1, [value])

    result = np.hstack(result)

    header.set_data_dtype(result.dtype)  # Avoids loss of precision
    img = nifti1.Nifti1Image(masker.unmask(result).squeeze(), None, header)
    img.to_filename(outfile)
开发者ID:margulies,项目名称:NS_Classify,代码行数:33,代码来源:tools.py


示例4: __call__

 def __call__(self, x):
     x_ = np.asarray(x)
     if x_.size == 1:
         x_.shape = (1,)
     np.place(x_, x_ > 0, [1])
     np.place(x_, x_ < 0, [-1])
     return x_
开发者ID:evenmarbles,项目名称:rlpy,代码行数:7,代码来源:neuralnet.py


示例5: remove_wrongly_sized_connected_components

 def remove_wrongly_sized_connected_components(self, a, min_size, max_size, in_place):
     """
     Adapted from http://github.com/jni/ray/blob/develop/ray/morpho.py
     (MIT License)
     """
     bin_out = self.BinaryOut.value
     
     original_dtype = a.dtype
         
     if not in_place:
         a = a.copy()
     if min_size == 0 and (max_size is None or max_size > numpy.prod(a.shape)): # shortcut for efficiency
         return a
     
     try:
         component_sizes = numpy.bincount( a.ravel() )
     except TypeError:
         # On 32-bit systems, must explicitly convert from uint32 to int
         # (This fix is just for VM testing.)
         component_sizes = numpy.bincount( numpy.asarray(a.ravel(), dtype=int) )
     bad_sizes = component_sizes < min_size
     if max_size is not None:
         numpy.logical_or( bad_sizes, component_sizes > max_size, out=bad_sizes )
     
     bad_locations = bad_sizes[a]
     a[bad_locations] = 0
     if (bin_out):
         # Replace non-zero values with 1
         numpy.place(a,a,1)
     return numpy.array(a, dtype=original_dtype)
开发者ID:JensNRAD,项目名称:lazyflow,代码行数:30,代码来源:opFilterLabels.py


示例6: substitute_values

    def substitute_values(self, vect):
        """
        Internal method to substitute integers into the vector, and construct
        metadata to convert back to the original vector.

        np.nan is always given -1, all other objects are given integers in
        order of apperence.

        Parameters
        ----------
        vect : np.array
            the vector in which to substitute values in
        """

        try:
            unique = np.unique(vect)
        except:
            unique = set(vect)

        unique = [
            x for x in unique if not isinstance(x, float) or not isnan(x)
        ]

        arr = np.copy(vect)
        for new_id, value in enumerate(unique):
            np.place(arr, arr==value, new_id)
            self.metadata[new_id] = value
        arr = arr.astype(np.float)
        np.place(arr, np.isnan(arr), -1)
        self.arr = arr

        if -1 in arr:
            self.metadata[-1] = self._missing_id
开发者ID:Rambatino,项目名称:CHAID,代码行数:33,代码来源:column.py


示例7: _getdatafromsql

def _getdatafromsql(connection, tmp_table, query):
    """
    Private function creating a ndarray from the current table.

    Parameters
    ----------
    connection: sqlite3.Connection
        Current SQL connection.
    tmp_table: string
        Name of the temporary table created for the purpose of keeping ids when WHERE is used
    query: string
        SQL query.
    """
    # Transforms the typestr into dtypes
    
    # Define and execute the query
    connection.execute("CREATE TEMPORARY TABLE %s AS %s"%(tmp_table, query))
    
    # Get the list of names and types from the pragma
    pragmastr = "PRAGMA TABLE_INFO(%s)"%tmp_table
    (names, typestr) = zip(*(_[1:3] for _ in connection.execute(pragmastr).fetchall()))
    ndtype = []
    for (i, (n, t)) in enumerate(zip(names, typestr)):
        
        # Transform the name into a regular string (not unicode)
        n = str(n)
        if t =='INTEGER':
            ndtype.append((n, int))
        elif t =='TEXT':
            ndtype.append((n, '|S30'))
        elif t == 'BLOB':
            ndtype.append((n, object))
        else:
            ndtype.append((n, float))
    
    # Construct the ndarray
    connection.row_factory = sqlite3.Row
    data = connection.execute("SELECT * FROM %s"%tmp_table).fetchall()
    try:
        return np.array(data, dtype=ndtype)
    except TypeError:
        output = ma.empty(len(data), dtype=ndtype)
        
        # Find the index of the first row (0 or 1)?
        rowidref = connection.execute("SELECT rowid FROM %s LIMIT 1"%tmp_table).fetchone()[0]
        
        # Loop through the different fields identifying the null fields to mask
        maskstr_template = "SELECT rowid FROM %s WHERE %%s IS NULL"%tmp_table
        datastr_template = "SELECT %%s FROM %s WHERE %%s IS NOT NULL"%tmp_table
        for (i, field) in enumerate(names):
            current_output = output[field]
            current_mask = current_output._mask
            maskstr = maskstr_template % field
            maskidx = [_[0] - rowidref for _ in connection.execute(maskstr).fetchall()]
            current_mask[maskidx] = True
            datastr = datastr_template % (field, field)
            np.place(current_output._data, ~current_mask,
                [_[0] for _ in connection.execute(datastr).fetchall()])
        connection.execute("DROP TABLE %s"%tmp_table)
        return output
开发者ID:calanoue,项目名称:GFIN_Data_Work,代码行数:60,代码来源:sqlite_io.py


示例8: pcprint

 def pcprint(self):
     if self.doBoth==1:
         subplot(1,2,2)
         fs=12
     else:
         fs=15
     self.c1=self.data1.copy()      #for copying histogram value
     data2=np.isfinite(self.data1)
     data3=self.data1[data2]
     nn=str(self.lineEdit_4.text())
     b=nn.split(',')
     if len(b)==1:
         m=int(b[0])
         n=int(b[0])
     else:
         m=int(b[0])
         n=int(b[1])
     dmax=nanmax(data3)
     dmin=nanmin(data3)
     total=dmax-dmin
     low=dmin+(total*m)/100
     high=dmax-(total*n)/100
     datal = np.where(data3 < low ,low,data3)
     datah = np.where(datal > high ,high,datal)
     if self.checkBox.isChecked():   # for fix Legend
         self.c1[0][0]=self.maxdata
         self.c1[0][1]=self.mindata
     np.place(self.c1,data2,datah)
     plt.imshow(self.c1,cmap=get_cmap(self.colormap),extent=[self.originX,self.lastX,self.lastY,self.originY])
     plt.text(79.2,31,self.image,fontsize=fs)
     plt.title('Percentile Clipped')
     plt.colorbar()
开发者ID:adityacp,项目名称:Time_Series_Satellite_Data_Visualizer,代码行数:32,代码来源:TSSDV.py


示例9: remove_wrongly_sized_connected_components

def remove_wrongly_sized_connected_components(a, min_size,
        max_size=None,
        in_place=False, bin_out=False):
    """
    Copied from lazyflow.operators.opFilterLabels.py
    Originally adapted from http://github.com/jni/ray/blob/develop/ray/morpho.py
    (MIT License)
    """
    original_dtype = a.dtype

    if not in_place:
        a = a.copy()
    if min_size == 0 and (max_size is None or max_size > np.prod(a.shape)): # shortcut for efficiency
        if (bin_out):
            np.place(a,a,1)
        return a

    try:
        component_sizes = np.bincount( a.ravel() )
    except TypeError:
        # On 32-bit systems, must explicitly convert from uint32 to int
        # (This fix is just for VM testing.)
        component_sizes = np.bincount( np.asarray(a.ravel(), dtype=int) )
    bad_sizes = component_sizes < min_size
    if max_size is not None:
        np.logical_or( bad_sizes, component_sizes > max_size, out=bad_sizes )

    bad_locations = bad_sizes[a]
    a[bad_locations] = 0
    if (bin_out):
        # Replace non-zero values with 1
        np.place(a,a,1)
    return np.array(a, dtype=original_dtype)
开发者ID:constantinpape,项目名称:DenseReconstruction,代码行数:33,代码来源:dense_reconstruction.py


示例10: matrixDiscreteMaker

def matrixDiscreteMaker(t):

    '''
    this function converts the frequencies matrix t into an integer valued-matrix
    rules used:

    less than 1% or nan --> nan
    between 1% and 5% --> 1
    between 5% and 10% --> 2
    between 10% and 20% --> 3
    more than 20% --> 4
    '''

    # nan, convert to 0 to avoid numerical warnings
    numpy.place(t,numpy.isnan(t),0)

    # between 1% and 5% --> 1
    t[numpy.where(numpy.logical_and(t>=0.01, t<0.05))]=1

    # between 5% and 10% --> 2
    t[numpy.where(numpy.logical_and(t>=0.05, t<0.1))]=2

    # between 10% and 20% --> 3
    t[numpy.where(numpy.logical_and(t>=0.1, t<0.2))]=3

    # more than 20% --> 4
    t[numpy.where(numpy.logical_and(t>=0.2, t<1-1e-10))]=4

    # less than 1% or nan --> nan, which maps as white. this line should be last in order to avoid numerical warnings
    numpy.place(t,t<0.01,float('nan'))

    return t
开发者ID:adelomana,项目名称:erebus,代码行数:32,代码来源:abundancesGrapher.py


示例11: sdprint

    def sdprint(self):
        if self.doBoth==1:
            subplot(1,2,2)
            fs=12
        else:
            fs=15
        self.c1=self.data1.copy()      #for copying histogram value
        data2=np.isfinite(self.data1)
        data3=self.data1[data2]
        mean=np.mean(data3)
        sd=np.std(data3)
        nn=str(self.lineEdit_4.text())  
        b=nn.split(',')
        if len(b)==1:
            m=int(b[0])
            n=int(b[0])
        else:
            m=int(b[0])
            n=int(b[1])
        low=mean-m*sd
        high=mean+n*sd
##        print sd,mean,low,high
        datal = np.where(data3 < low ,low,data3)
        datah = np.where(datal > high ,high,datal)
        np.place(self.c1,data2,datah)
        if self.checkBox.isChecked():       # for Fix-Legend
            self.c1[0][0]=self.maxdata
            self.c1[0][1]=self.mindata
        plt.imshow(self.c1,cmap=get_cmap(self.colormap),extent=[self.originX,self.lastX,self.lastY,self.originY])
        plt.text(79.2,31,self.image,fontsize=fs)
        plt.title('Standard Deviation')
        plt.colorbar()
开发者ID:adityacp,项目名称:Time_Series_Satellite_Data_Visualizer,代码行数:32,代码来源:TSSDV.py


示例12: IDCTnDequantize

	def IDCTnDequantize(prop):
		#To dequantize
		img3 = prop.image
		iHeight, iWidth = img3.shape[:2]
		img2 = np.zeros((iHeight,iWidth,3), np.uint8)
		#print img2.dtype

		for startY in range(0, iHeight, 8):
			for startX in range(0, iWidth, 8):
				for c in range(0, 3):
					block = img3[startY:startY+8, startX:startX+8, c:c+1].reshape(8,8)
			   
					blockf = np.float32(block)	 # float conversion
					dst = cv2.idct(blockf)		 # inverse dct
					np.place(dst, dst>255.0, 255.0)	 # saturation
					np.place(dst, dst<0.0, 0.0)		 # grounding 
					block = np.uint8(np.around(dst)) 

					# store the results
					for y in range(8):
						for x in range(8):
							img2[startY+y, startX+x, c] = block[y, x]
		
		# convert to BGR
		img2 = cv2.cvtColor(img2, cv2.COLOR_YCR_CB2BGR)

		prop.image = img2
开发者ID:riddhishb,项目名称:ImageProcessing_Tools,代码行数:27,代码来源:JSteg.py


示例13: micro_step

    def micro_step(self):
        """ Defining microphysical step """
        print "qc min, max przed mikro", self.state["rc"].min(), self.state["rc"].max()
        print "nc min, max przed mikro", self.state["nc"].min(), self.state["nc"].max()
        # dot_ variables have to be zero before rhs_cellwise
        for k in ("dot_th_d", "dot_rv", "dot_rc", "dot_nc", "dot_rr", "dot_nr"):
            self.state_dot[k] *= 0.
        #pdb.set_trace()
        libcl.blk_2m.rhs_cellwise(self.opts,
                                  self.state_dot["dot_th_d"], self.state_dot["dot_rv"],
                                  self.state_dot["dot_rc"], self.state_dot["dot_nc"],
                                  self.state_dot["dot_rr"], self.state_dot["dot_nr"],
                                  self.state["rho_d"], self.state["th_d"],
                                  self.state["rv"], self.state["rc"], self.state["nc"],
                                  self.state["rr"], self.state["nr"], self.dt)
        print "qc min, max po mikro", self.state["rc"].min(), self.state["rc"].max()
        print "nc min, max po mikro", self.state["nc"].min(), self.state["nc"].max()
                        
        
        for k in ("th_d", "rv", "rc", "nc", "rr", "nr"):
            self.state[k] += self.state_dot["dot_"+k] * self.dt

        # rc, nc can be sometimes smaller than zero -- TODO!!
        np.place(self.state["rc"], self.state["rc"]<0, 0)
        np.place(self.state["nc"], self.state["nc"]<0, 0)
        print "qc min, max po place", self.state["rc"].min(), self.state["rc"].max()
        print "nc min, max po place", self.state["nc"].min(), self.state["nc"].max()
开发者ID:djarecka,项目名称:cloudtest,代码行数:27,代码来源:oop_2mom_adv_hor.py


示例14: generate

def generate(input):
    """ Testing function, all parameters are in a single tuple """
    seed,gen,p0,p1,n,method,MCsize= input
    npran.seed(seed)
    if gen.lower() == "ising":
        X    = ising_X(p1+p0,n)
        ynor = norm_y(X,p1)
    elif gen.lower() == "genetic":
        genes = np.genfromtxt('data/SNPdata.txt', delimiter=',')
        np.place(genes,genes!=0,1)
        X = given_X(p1+p0,n,genes)
    ybin = bern_y(X,p1)
    ynor = norm_y(X,p1)

    # Logit
    bin_logit = ko.knockoff_logit(ybin,X,.2,
                              knockoff='binary',
                              method=method,
                              MCsize=MCsize,
                              intercept=True
                              )
    bin_logit.fit()
    ori_logit = ko.knockoff_logit(ybin,X,.2,
                              knockoff='original',
                              intercept=False
                              )
    ori_logit.fit()
    trueS = (np.arange(p0+p1)<p1).astype(int)
    bin_FDR   = np.dot(bin_logit.S,1-trueS)/max(np.sum(bin_logit.S),1)
    bin_power = np.dot(bin_logit.S,trueS)  /max(p1,1)
    ori_FDR   = np.dot(ori_logit.S,1-trueS)/max(np.sum(ori_logit.S),1)
    ori_power = np.dot(ori_logit.S,trueS)  /max(p1,1)
    corr      = np.corrcoef(ori_logit.S,bin_logit.S)[0,1]
    ko_corr = [cor for cor in bin_logit.emp_ko_corr if not np.isnan(cor)]

    with open('data/logit_test_'+str(p0+p1)+'_w_n.txt','a') as f:
        f.write("%d\t%s\t%d\t%d\t%.5f\t%.5f\t%.5f\t%.5f\t%.5f\t%.5f\t%.5f\t%.5f\n" % (seed, gen, p1, n, bin_logit.M_distortion, np.mean(ko_corr), bin_FDR, bin_power, np.mean(ori_logit.emp_ko_corr), ori_FDR, ori_power, corr))

    # LASSO
    bin_lasso = ko.knockoff_lasso(ynor,X,.2,
                              knockoff='binary',
                              method=method,
                              MCsize=MCsize,
                              intercept=True
                              )
    bin_lasso.fit(bin_logit.X_lrg)
    ori_lasso = ko.knockoff_lasso(ynor,X,.2,
                              knockoff='original',
                              intercept=False
                              )
    ori_lasso.fit()
    trueS = (np.arange(p0+p1)<p1).astype(int)
    bin_FDR   = np.dot(bin_lasso.S,1-trueS)/max(np.sum(bin_lasso.S),1)
    bin_power = np.dot(bin_lasso.S,trueS)  /max(p1,1)
    ori_FDR   = np.dot(ori_lasso.S,1-trueS)/max(np.sum(ori_lasso.S),1)
    ori_power = np.dot(ori_lasso.S,trueS)  /max(p1,1)
    corr      = np.corrcoef(ori_lasso.S,bin_lasso.S)[0,1]

    with open('data/lasso_test_'+str(p0+p1)+'_w_n.txt','a') as f:
        f.write("%d\t%s\t%d\t%d\t%.5f\t%.5f\t%.5f\t%.5f\t%.5f\t%.5f\t%.5f\t%.5f\n" % (seed, gen, p1, n, bin_logit.M_distortion, np.mean(ko_corr), bin_FDR, bin_power, np.mean(ori_lasso.emp_ko_corr), ori_FDR, ori_power, corr))
开发者ID:ajmaurer,项目名称:Chicago-Course-Work,代码行数:60,代码来源:compare.py


示例15: equalise_histogram

def equalise_histogram(image):
    histo = generate_histogram(image)
    new_image = np.zeros(image.shape)

    height, width = image.shape
    total_pixels = height * width

    freq_sum = np.sum(histo)

    for i, freq in enumerate(reversed(histo)):
        intensity = 255 - i
        new_intensity = round(255 * freq_sum / total_pixels)

        temp = image.copy()
        np.place(temp, temp != intensity, 0)
        np.place(temp, temp == intensity, new_intensity)

        new_image += temp

        freq_sum -= freq

    new_image = np.array(new_image, dtype=np.uint8)
    new_histo = generate_histogram(new_image)
    plot_histogram(new_histo)

    return new_image
开发者ID:gautamabhishek46,项目名称:IVP_Assignment_2,代码行数:26,代码来源:main.py


示例16: logsumexp_array

def logsumexp_array(x, axis=None):
    """
    Compute log(sum(exp(x))) along a particular axis for Numpy arrays.  
    """
    # This implementation hasn't been tested on arrays with dimension > 2!
    if axis is None:
        max_ent = x.max()
        bias = max_ent
    else:
        max_ent = x.max(axis)
        bias = max_ent if axis == 0 else max_ent[:, numpy.newaxis]

    # In the no-axis case, if -Inf is the max value, it means *all* the entries
    # were -Inf, so we already know what's going to happen and we can skip doing
    # real work.
    if axis is None and max_ent == LOG_ZERO:
        return LOG_ZERO
        
    # Otherwise, there's a bit of trickiness here - subtracting -Inf results in
    # Nan, so we never want to use that as a bias.
    mask = (max_ent == LOG_ZERO)
    if mask.any():
        # If some rows or columns have only -Inf values, use a bias of 0 in just
        # those rows or cols.
        numpy.place(max_ent, mask, 0.0)
        numpy.place(bias, bias == LOG_ZERO, 0.0)
    return max_ent + quiet_log(numpy.sum(numpy.exp(x - bias), axis=axis))
开发者ID:d-unknown-processor,项目名称:speechAD,代码行数:27,代码来源:mathutils.py


示例17: testDropNegatives

 def testDropNegatives(self):
   # Note: the test is done by replacing segment_ids with 8 to -1
   # for index  and replace values generated by numpy with 0.
   dtypes = [
       dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int64,
       dtypes_lib.int32, dtypes_lib.complex64, dtypes_lib.complex128
   ]
   indices_flat = np.array([0, 4, 0, 8, 3, 8, 4, 7, 7, 3])
   num_segments = 12
   for indices in indices_flat, indices_flat.reshape(5, 2):
     shape = indices.shape + (2,)
     for dtype in dtypes:
       with self.test_session(use_gpu=True):
         tf_x, np_x = self._input(shape, dtype=dtype)
         np_ans = self._segmentReduce(
             indices, np_x, np.add, op2=None, num_segments=num_segments)
         # Replace np_ans[8] with 0 for the value
         np_ans[8:] = 0
         # Replace 8 with -1 in indices
         np.place(indices, indices == 8, [-1])
         s = math_ops.unsorted_segment_sum(
             data=tf_x, segment_ids=indices, num_segments=num_segments)
         tf_ans = s.eval()
       self.assertAllClose(np_ans, tf_ans)
       self.assertShapeEqual(np_ans, s)
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:25,代码来源:segment_reduction_ops_test.py


示例18: get_normalized_data

def get_normalized_data():
    print("Reading in and transforming data...")

    if not os.path.exists('../large_files/train.csv'):
        print('Looking for ../large_files/train.csv')
        print('You have not downloaded the data and/or not placed the files in the correct location.')
        print('Please get the data from: https://www.kaggle.com/c/digit-recognizer')
        print('Place train.csv in the folder large_files adjacent to the class folder')
        exit()

    df = pd.read_csv('../large_files/train.csv')
    data = df.values.astype(np.float32)
    np.random.shuffle(data)
    X = data[:, 1:]
    Y = data[:, 0]

    Xtrain = X[:-1000]
    Ytrain = Y[:-1000]
    Xtest  = X[-1000:]
    Ytest  = Y[-1000:]

    # normalize the data
    mu = Xtrain.mean(axis=0)
    std = Xtrain.std(axis=0)
    np.place(std, std == 0, 1)
    Xtrain = (Xtrain - mu) / std
    Xtest = (Xtest - mu) / std
    
    return Xtrain, Xtest, Ytrain, Ytest
开发者ID:cmagnusb,项目名称:machine_learning_examples,代码行数:29,代码来源:util.py


示例19: solve_tangency

def solve_tangency(R, C, rf, fit_func=lambda mean, var, rf: mean-rf/(var**.5)):
    """Calculates the tangency portfolio given a set of expected returns (R), covariances (C),
risk-free rate (rf), and a fitness function (fit_func) which defaults to the Sharpe ratio.

Returns the weights of the tangency portfolio.
"""
    n = len(R)
    #Begin with equal weights
    W = np.ones([n])/n

    # Replace expected returns that are less than rf with rf + a super small value
    # since if it's less than rf, the sharpe ratio is negative, which ruins minimization
    np.place(R, R<rf, rf+0.00001)

    # Set boundaries on weights - no shorting or leverage allowed. Can probably incorporate
    # this functionality easily, though.
    bounds = [(0., 1.) for i in xrange(n)]

    # Set constraints as defined in SciPy's documentation for minimize. 'fun' forces the weights to sum to 1
    constraints = ({'type':'eq',
                    'fun': lambda W: sum(W)-1.})

    # Minimize fitness by changing W with (R, C, rf, fit_func) as given using the SLQSP method
    # based on the above defined constraints and bounds
    tangency = spo.minimize(fun=fitness,
                            x0=W,
                            args=(R, C, rf, fit_func),
                            method='SLSQP',
                            constraints=constraints,
                            bounds=bounds)
    if not tangency.success:
        raise BaseException(tangency.message)
    return tangency.x
开发者ID:a-phillips,项目名称:pyfi,代码行数:33,代码来源:mvo.py


示例20: get_iris

def get_iris(catagory):
    
    data=np.loadtxt('iris.txt')
    np.place(data[:,0],data[:,0]!=catagory,-1)
    np.place(data[:,0],data[:,0]==catagory,1)
    
    return [np.concatenate((data[0:25],data[75:125])), np.concatenate((data[25:75],data[125:150]))]
开发者ID:matthew-norton,项目名称:SVM-Kernel-Selection,代码行数:7,代码来源:getdata.py



注:本文中的numpy.place函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python numpy.pmt函数代码示例发布时间:2022-05-27
下一篇:
Python numpy.piecewise函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap