• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python numpy.exp2函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中numpy.exp2函数的典型用法代码示例。如果您正苦于以下问题:Python exp2函数的具体用法?Python exp2怎么用?Python exp2使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了exp2函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: __add__

 def __add__(self, other):
     """
     Addition in real space; an optimization of Manning & Schuetze,
     p. 337 (eq. 9.21)
     
     >>> a_real = .5
     >>> b_real = .25
     >>> a_bw = BitWeight(a_real)
     >>> b_bw = BitWeight(b_real)
     >>> BitWeight.close_enough((a_bw + b_bw).to_real, a_real + b_real)
     True
     >>> (BitWeight(.25) + BitWeight(.25)).to_real
     0.5
     """
     other_bw = other if hasattr(other, "bw") else BitWeight(other)
     if other_bw.bw - self.bw > self.BIG:
         to_return = self.bw
     elif self.bw - other_bw.bw > self.BIG:
         to_return = other_bw.bw
     else:
         if other_bw.bw > self.bw:
             to_return = other_bw.bw - log2(1.0 + exp2(other_bw.bw - self.bw))
         elif other_bw.bw < self.bw:
             to_return = self.bw - log2(exp2(self.bw - other_bw.bw) + 1.0)
         else:
             to_return = other_bw.bw - 1.0
             # not 1 + x_bw.bw as you might think, as BWs are
             # NEGATIVE log-weights
     return BitWeight(to_return, True)
开发者ID:kylebgorman,项目名称:kylebgorman.github.io,代码行数:29,代码来源:bitweight.py


示例2: summary_stats

def summary_stats(alist):
    # Compute summary stats from dropout activations returned by simulate.
    Elist = []
    NWGMlist = []
    Vlist = []
    
    #raise Exception('scipy not working!')
    #from scipy import stats as stats

    for l, a in enumerate(alist):
        E  = np.mean(a, axis=0)      # Arithmetic mean over dropout samples.
        #G  = stats.gmean(a, axis=0)  # Geometric mean.
        #G  = np.prod(a, axis=0) ** 1.0/a.shape[0]  # Geometric mean.
        G  = np.exp2(np.sum(np.log2(a), axis=0) * 1.0/a.shape[0])  # Geometric mean.
        #N  = stats.gmean(1.0-a, axis=0)
        #N  = np.prod(1.0-a, axis=0) ** 1.0/a.shape[0]
        N  = np.exp2(np.sum(np.log2(1.0-a), axis=0) * 1.0/a.shape[0])
        NWGM = G / (G + N)             # Normalized geometric mean.
        V = np.var(a, axis=0)

        # Change 1 x Units x Inputs matrix to Units x Inputs
        Elist.append(E)
        NWGMlist.append(NWGM)
        Vlist.append(V)
    return Elist, NWGMlist, Vlist
开发者ID:sdmassey27,项目名称:pylearn2,代码行数:25,代码来源:dropoutsimulator.py


示例3: train

 def train(self, feature_stream, alpha, beta, lamba1, lamba2):
     validate_helper = utility.ValidateHelper()
     self.z = np.zeros(self.feature_count)
     self.n = np.zeros(self.feature_count)
     self.w = np.zeros(self.feature_count)
     for count, (click, features) in enumerate(feature_stream):
         no_zero_index = []
         t = 0
         for feature_index in features:
             no_zero_index.append(feature_index)
             if np.abs(self.z[feature_index]) > lamba1:
                 _t = (-1.0 / ((beta + np.sqrt(self.n[feature_index])) / alpha + lamba2)) * (self.z[feature_index] - np.sign(self.z[feature_index]) * lamba1)
                 self.w[feature_index] = _t
                 t += _t
             else:
                 self.w[feature_index] = 0
         p = math.sigmoid(t)
         for feature_index in no_zero_index:
             g = p - click
             sigma = (1.0 / alpha) * (np.sqrt(self.n[feature_index] + np.exp2(g)) - np.sqrt(self.n[feature_index]))
             w_i = self.w[feature_index]
             self.z[feature_index] += g - sigma * w_i
             self.n[feature_index] += np.exp2(g)
         validate_helper.update(p, click, 0.5)
     validate_helper.out_put()
开发者ID:breakhearts,项目名称:ctr,代码行数:25,代码来源:fctl.py


示例4: __add__

 def __add__(self, x):
     """
     Addition in real space; an optimization of Manning & Schuetze,
     p. 337 (eq. 9.21)
     
     >>> a_real = .5
     >>> b_real = .25
     >>> a_bw = BitWeight(a_real)
     >>> b_bw = BitWeight(b_real)
     >>> BitWeight.close_enough((a_bw + b_bw).to_real, a_real + b_real)
     True
     """
     x_bw = x if hasattr(x, 'bw') else BitWeight(x)
     if x_bw.bw - self.bw > self.BIG:
         to_return = self.bw
     elif self.bw - x_bw.bw > self.BIG:
         to_return = x_bw.bw
     else:
         if x_bw.bw > self.bw:
             to_return = x_bw.bw - log2(1. + exp2(x_bw.bw - self.bw))
         elif x_bw.bw < self.bw:
             to_return = self.bw - log2(exp2(self.bw - x_bw.bw) + 1.)
         else:
             to_return = 1. - x_bw.bw
             # not 1 + x_bw.bw as you might think, as BWs are
             # NEGATIVE log-weights
     return BitWeight(to_return, True)
开发者ID:shiranD,项目名称:wordPromPred,代码行数:27,代码来源:bitweight.py


示例5: calc_feature

def calc_feature(centroids, patch_width, stride, path, p, q):
	t = time()
	image = misc.imread(path)

	# Crop here
	crop_size = 300
	startX = (image.shape[0] - crop_size) / 2
	startY = (image.shape[0] - crop_size) / 2
	endX = startX + crop_size
	endY = startY + crop_size
	image = image[startX:endX, startY:endY, :]

	# Extract patches
	patches = patch_extract(image, patch_width, stride)
	patches = numpy.float32(patches)

	# Preprocessing
	# Normalize
	patches = patches - numpy.asmatrix(patches.mean(axis=1)).T
	patches = patches / patches.std(axis=1)
	patches = numpy.nan_to_num(patches)

	# Triangle (soft) activation function
	xx = numpy.sum(numpy.exp2(patches), axis=1)
	cc = numpy.sum(numpy.exp2(centroids), axis=1)
	xc = 2*numpy.dot(patches, numpy.transpose(centroids))

	z = numpy.sqrt(cc + (xx - xc))
	mu = z.mean(axis=1)
	patches = numpy.maximum(0, mu-z)

	# Reshape to 2D plane before pooling
	rows = image.shape[0] - patch_width + 1
	cols = image.shape[1] - patch_width + 1
	patches = numpy.array(patches, copy=False).reshape(rows, cols, centroids.shape[0], order="F")

	# Pool
	half_rows = round(rows / 2)
	half_cols = round(cols / 2)

	# Calculate pool values
	q1 = numpy.sum(numpy.sum(patches[1:half_rows, 1:half_cols, :], 0), 0)
	q2 = numpy.sum(numpy.sum(patches[half_rows+1:patches.shape[0], 1:half_cols, :], 0), 0)
	q3 = numpy.sum(numpy.sum(patches[1:half_rows, half_cols+1:patches.shape[1], :], 0), 0)
	q4 = numpy.sum(numpy.sum(patches[half_rows+1:patches.shape[0], half_cols+1:patches.shape[1], :], 0), 0)

	# Print time
	#print "Finished %s, took %.2f seconds" %(path, time() - t)

	output = numpy.transpose(numpy.append(q1, numpy.append(q2, numpy.append(q3, q4))))

	# Put output in queue (so that it is sent to the original thread)
	q.put((p, output))

	# Concatenate and return
	return 0
开发者ID:StevenReitsma,项目名称:kaggle-galaxyzoo,代码行数:56,代码来源:extract_features.py


示例6: decay_gene_ls

def decay_gene_ls(cell1='sphere', cell2='shield', FC_cutoff=2):
	rpkm_dict, df = read_rpkm2()
	decay_genes = []
	for i,j in rpkm_dict.items():
		if j['DMSO_%s'%(cell1)] > 0 and j['DMSO_%s'%(cell2)] > 0:
			fold_change =   np.exp2(j['DMSO_%s'%(cell1)]) / np.exp2(j['DMSO_%s'%(cell2)]) 
			if fold_change >= FC_cutoff:
				decay_genes.append(i)
	print "decay_genes: %s"%(len(decay_genes)), decay_genes[0:5]
	return decay_genes
开发者ID:Tsinghua-gongjing,项目名称:test,代码行数:10,代码来源:RBP_predict_with_icshape.py


示例7: analyze_waittimes

def analyze_waittimes():
    waits = np.loadtxt('results/wait_ratios.csv', delimiter=',')
    threads = np.exp2(np.arange(7))
    color_names = map(lambda x: '{0:d} Vertices'.format(int(x)), np.exp2(np.arange(4,11)))
    colors = ['black', 'violet', 'blue', 'green', 'yellow', 'orange', 'red']

    plt.figure(figsize=(12, 8))
    plots = []
    for i in range(waits.shape[0]):
        plots.append(plt.plot(threads, waits[i], color=colors[i], linestyle='-')[0])

    plt.xscale('log', basex=2)
    plt.yscale('log', basey=2)
    plt.legend(plots, color_names, loc=4)
    plt.savefig('img/waits.png', dpi=200, bbox_inches='tight')
开发者ID:gideonla,项目名称:w2014,代码行数:15,代码来源:analyze.py


示例8: _guess_average_depth

    def _guess_average_depth(self, segments=None, window=100):
        """Estimate the effective average read depth from variance.

        Assume read depths are Poisson distributed, converting log2 values to
        absolute counts. Then the mean depth equals the variance , and the average
        read depth is the estimated mean divided by the estimated variance.
        Use robust estimators (Tukey's biweight location and midvariance) to
        compensate for outliers and overdispersion.

        With `segments`, take the residuals of this array's log2 values from
        those of the segments to remove the confounding effect of real CNVs.

        If `window` is an integer, calculate and subtract a smoothed trendline
        to remove the effect of CNVs without segmentation (skipped if `segments`
        are given).

        See: http://www.evanmiller.org/how-to-read-an-unlabeled-sales-chart.html
        """
        # Try to drop allosomes
        cnarr = self.autosomes()
        if not len(cnarr):
            cnarr = self
        # Remove variations due to real/likely CNVs
        y_log2 = cnarr.residuals(segments)
        if segments is None and window:
            y_log2 -= smoothing.savgol(y_log2, window)
        # Guess Poisson parameter from absolute-scale values
        y = np.exp2(y_log2)
        # ENH: use weight argument to these stats
        loc = descriptives.biweight_location(y)
        spread = descriptives.biweight_midvariance(y, loc)
        if spread > 0:
            return loc / spread**2
        return loc
开发者ID:chapmanb,项目名称:cnvkit,代码行数:34,代码来源:cnary.py


示例9: exp_through_polynomial_fit

def exp_through_polynomial_fit(value, length=12):
    """
        http://jrfonseca.blogspot.com/2008/09/fast-sse2-pow-tables-or-polynomials.html
        assuming powf(x, y) == exp2(log2(x) * y)) since x**y = 2**(log2(x**y)) = 2**(y * log2(x))
        then powf(e, y) == exp2(log2(e) * y)), log2(e) = 1.442695040888963387004650940071,
        exp2(1.442695040888963387004650940071 * y)
        break apart (1.442695040888963387004650940071 * y) into real and integral,

        IEEE doubles are represented using 64 bits where:
            value = -1**b[63] + (int(b[52:64]) - 1023) + 1 + sum(b[52 - i]/2**i for i in xrange(52))

        since x**(real + integral) => x**real * x**integral
        implement the integral part using fast shifts,
        the real portion will be implemented using a polynomial function ...
        we can further increase the accuracy by reducing the interval from (-1, 1) to (-.5, .5) by:
        taking the square root of each side and then squaring the final answer, Proof:
        (e**x)**0.5 = (2**(x * log2(e)))**0.5, let y = x * log2(e)
        (2**y)**0.5 = (2**(floor(y) + (y - floor(y))))**0.5 = (2**(floor(y)))**05 * (2**(y - floor(y)))**0.5
        (2**(y - floor(y)))**0.5 = 2**(0.5 * (y - floor(y))
        since -1 < y - floor(y) < 1 we have -0.5 < 0.5 * (y - floor(y)) < 0.5
        the final result would simply need to be squared since ((e**x)**0.5)**2 = (e**x)**(2*0.5) = e**x ...
    """
    y = value * 1.442695040888963387004650940071
    integral = numpy.sqrt(numpy.exp2(int(y)))
    return (integral * numpy.polyval(remez(numpy.exp2, (-0.5, 0.5), length), (y - int(y))/2.0))**2
开发者ID:samyvilar,项目名称:vectorization,代码行数:25,代码来源:linalg.py


示例10: test_exp2

 def test_exp2(self):
     from numpy import array, exp2
     inf = float('inf')
     ninf = -float('inf')
     nan = float('nan')
     cmpl = complex
     for c, rel_err in (('complex64', 2e-7), ('complex128', 2e-15), ('clongdouble', 2e-15)):
         a = [cmpl(-5., 0), cmpl(-5., -5.), cmpl(-5., 5.),
                    cmpl(0., -5.), cmpl(0., 0.), cmpl(0., 5.),
                    cmpl(-0., -5.), cmpl(-0., 0.), cmpl(-0., 5.),
                    cmpl(-0., -0.), cmpl(inf, 0.), cmpl(inf, 5.),
                    cmpl(inf, -0.), cmpl(ninf, 0.), cmpl(ninf, 5.),
                    cmpl(ninf, -0.), cmpl(ninf, inf), cmpl(inf, inf),
                    cmpl(ninf, ninf), cmpl(5., inf), cmpl(5., ninf),
                    cmpl(nan, 5.), cmpl(5., nan), cmpl(nan, nan),
                  ]
         b = exp2(array(a,dtype=c))
         for i in range(len(a)):
             try:
                 res = self.c_pow((2,0), (a[i].real, a[i].imag))
             except OverflowError:
                 res = (inf, nan)
             except ValueError:
                 res = (nan, nan)
             msg = 'result of 2**%r(%r) got %r expected %r\n ' % \
                         (c,a[i], b[i], res)
             # cast untranslated boxed results to float,
             # does no harm when translated
             t1 = float(res[0])
             t2 = float(b[i].real)
             self.rAlmostEqual(t1, t2, rel_err=rel_err, msg=msg)
             t1 = float(res[1])
             t2 = float(b[i].imag)
             self.rAlmostEqual(t1, t2, rel_err=rel_err, msg=msg)
开发者ID:abhinavthomas,项目名称:pypy,代码行数:34,代码来源:test_complex.py


示例11: computeActivity

    def computeActivity(self, inputActivity):
        logger.debug('computing activity.')
        self.ensureLength(inputActivity.max())

        # numpy array magic
        idx = numpy.mgrid[0:self.dims[0], 0:self.dims[1], 0:self.dims[2]]
        tInputActivity = numpy.tile(inputActivity, self.dims[:-1] + (1,))
        factors = 2 * self.counts[idx[0],idx[1],idx[2],tInputActivity] / numpy.sum(self.counts, axis=3)
        mans,exps = numpy.frexp(factors)
        mantissas, exponents = numpy.frexp(numpy.prod(mans, axis=2))
        exponents += exps.sum(axis=2)

        if self.maxexp is not None:
            maxexp = self.maxexp
        else:
            maxexp = exponents.max()

        exponents -= maxexp
        logger.debug("Maximum exponent: %d", maxexp)
        activity = mantissas * numpy.exp2(exponents)

        if self.p != 0:
            conscience = (self.coff / self.con)**self.p
            activity *= conscience

        activity *= numpy.prod(activity.shape) / activity.sum()
        return activity
开发者ID:tatome,项目名称:bauer_et_al_2015,代码行数:27,代码来源:network.py


示例12: transfer_fields

def transfer_fields(segments, cnarr, ignore=params.IGNORE_GENE_NAMES):
    """Map gene names, weights, depths from `cnarr` bins to `segarr` segments.

    Segment gene name is the comma-separated list of bin gene names. Segment
    weight is the sum of bin weights, and depth is the (weighted) mean of bin
    depths.
    """
    if not len(cnarr):
        return [], [], []

    ignore += params.ANTITARGET_ALIASES
    if 'weight' not in cnarr:
        cnarr['weight'] = 1
    if 'depth' not in cnarr:
        cnarr['depth'] = np.exp2(cnarr['log2'])
    seggenes = ['-'] * len(segments)
    segweights = np.zeros(len(segments))
    segdepths = np.zeros(len(segments))
    for i, (_seg, subprobes) in enumerate(cnarr.by_ranges(segments)):
        if not len(subprobes):
            continue
        segweights[i] = subprobes['weight'].sum()
        if subprobes['weight'].sum() > 0:
            segdepths[i] = np.average(subprobes['depth'], weights=subprobes['weight'])
        subgenes = [g for g in pd.unique(subprobes['gene']) if g not in ignore]
        if subgenes:
            seggenes[i] = ",".join(subgenes)
    return seggenes, segweights, segdepths
开发者ID:JimmyLiJing,项目名称:cnvkit,代码行数:28,代码来源:__init__.py


示例13: plot_relrisk_matrix

    def plot_relrisk_matrix(relrisk):
        t = relrisk.copy()
        matrix_shape = (t['exposure'].nunique(), t['event'].nunique())
        m = ut.daf.to.map_vals_to_ints_inplace(t, cols_to_map=['exposure'])
        m = m['exposure']
        ut.daf.to.map_vals_to_ints_inplace(t, cols_to_map={'event': dict(zip(m, range(len(m))))})
        RR = zeros(matrix_shape)
        RR[t['exposure'], t['event']] = t['relative_risk']
        RR[range(len(m)), range(len(m))] = nan

        RRL = np.log2(RR)
        def normalizor(X):
            min_x = nanmin(X)
            range_x = nanmax(X) - min_x
            return lambda x: (x - min_x) / range_x
        normalize_this = normalizor(RRL)
        center = normalize_this(0)



        color_map = shifted_color_map(cmap=cm.get_cmap('coolwarm'), start=0, midpoint=center, stop=1)
        imshow(RRL, cmap=color_map, interpolation='none');

        xticks(range(shape(RRL)[0]), m, rotation=90)
        yticks(range(shape(RRL)[1]), m)
        cbar = colorbar()
        cbar.ax.set_yticklabels(["%.02f" % x for x in np.exp2(array(ut.pplot.get.get_colorbar_tick_labels_as_floats(cbar)))])
开发者ID:yz-,项目名称:ut,代码行数:27,代码来源:pot.py


示例14: make_raw_outputFile

def make_raw_outputFile(data, probe_names, sample_names, adv=False, use_log=False, celltypes_to_use=None, filename='rawData.txt'):
	'''docstring for make_raw_outputFile
		prints the data to a file. If the advancedmode is on, then only samples/celltypes given, is printed, by reducing the input data.
		input: data, probe_names and sample names must be provided with same reduced index syntax, so that only probes wanted in file is given, an in constitent index order.
	'''
	
	outfile=open(filename, 'w')
	
	if not use_log:
		data = numpy.exp2(data)
	
	
	if adv:
		temp_index=0
		reduced_samplenames=sample_names
		for i in sample_names:
			if not i in celltypes_to_use:
				data = numpy.delete(data, temp_index, 1)
				reduced_samplenames = numpy.delete(reduced_samplenames, temp_index, 0)
			else:
				temp_index += 1
		
		sample_names=reduced_samplenames

	
	outfile.write(',')
	outfile.write(', '.join(sample_names))
	outfile.write('\n')
	for index,i in enumerate(probe_names):
		outfile.write(i)
		outfile.write(', ')
		for y in data[index]:
			outfile.write('%s,'%(y))
		outfile.write('\n')
开发者ID:frederikbagger,项目名称:JARID2,代码行数:34,代码来源:JARID2.py


示例15: test_sfu

def test_sfu():
    X = np.random.uniform(0, 1, 16).astype('float32')
    Y = run_code(sfu, X, 4)
    assert np.allclose(1/X, Y[0], rtol=1e-4)
    assert np.allclose(1/np.sqrt(X), Y[1], rtol=1e-4)
    assert np.allclose(np.exp2(X), Y[2], rtol=1e-4)
    assert np.allclose(np.log2(X), Y[3], rtol=1e-2)
开发者ID:MaveriQ,项目名称:py-videocore,代码行数:7,代码来源:test_sfu.py


示例16: __init__

 def __init__(self, dataset, ranking_size, allow_repetitions):
     Metric.__init__(self, dataset, ranking_size)
     self.discountParams=1.0+numpy.array(range(self.rankingSize), dtype=numpy.float64)
     self.discountParams[0]=2.0
     self.discountParams[1]=2.0
     self.discountParams=numpy.reciprocal(numpy.log2(self.discountParams))
     self.name='NDCG'
     
     self.normalizers=[]
     numQueries=len(self.dataset.docsPerQuery)
     for currentQuery in range(numQueries):
         validDocs=min(self.dataset.docsPerQuery[currentQuery], ranking_size)
         currentRelevances=self.dataset.relevances[currentQuery]
         
         #Handle filtered datasets properly
         if self.dataset.mask is not None:
             currentRelevances=currentRelevances[self.dataset.mask[currentQuery]]
         
         maxRelevances=None
         if allow_repetitions:
             maxRelevances=numpy.repeat(currentRelevances.max(), validDocs)
         else:
             maxRelevances=-numpy.sort(-currentRelevances)[0:validDocs]
     
         maxGain=numpy.exp2(maxRelevances)-1.0
         maxDCG=numpy.dot(self.discountParams[0:validDocs], maxGain)
         
         self.normalizers.append(maxDCG)
         
         if currentQuery % 1000==0:
             print(".", end="", flush=True)
             
     print("", flush=True)        
     print("NDCG:init [INFO] RankingSize", ranking_size, "\t AllowRepetitions?", allow_repetitions, flush=True)
开发者ID:slee1009,项目名称:slates_semisynth_expts,代码行数:34,代码来源:Metrics.py


示例17: exponential_grid

def exponential_grid(fitness_func, parameters):
    """Exponential parameter optimization that checks all possible values

    Values are visited in a grid order, linear in log space with the step being
    the log of the resolution.  Take care not to use 0 as it will cause
    problems when taking the log.

    If a parameter bounds are (.1, 10**5, 10) then the values used are
    [10**-2, 10**-1, 10**1, 10**2, 10**3, 10**4]

    Args:
        fitness_func: Fitness function that takes keyword arguments whos values
            are keys in 'parameters'.  Each keyword argument takes a float.
            The fitness function returns a float that we seek to maximize.
        parameters: Dict with keys as parameter names and values as
            (low, high, resolution) where generated parameters are [low, high)
            and resolution is a hint at the relevant scale of the parameter.

    Yields:
        Iterator of (fitness, params) where
        fitness: The value returned by the fitness_func given params
        params: Dict whos keys are those in parameters and values are floats
    """
    ranges = [np.exp2(np.arange(*np.log2(x))) for x in parameters.values()]
    for param_values in itertools.product(*ranges):
        params = dict(zip(parameters, param_values))
        yield fitness_func(**params), params
开发者ID:bwhite,项目名称:pyram,代码行数:27,代码来源:__init__.py


示例18: calculate_tmm_norm_factor

def calculate_tmm_norm_factor(ref, sample, trim_m=.3, trim_a=.05):
    if np.abs(ref - sample).sum() < 1e-10:
        return 1.

    zero_positions = ((ref == 0) | (sample == 0))

    ref_nonzero = ref[~zero_positions]
    sample_nonzero = sample[~zero_positions]
    log_ref_nonzero = np.log2(ref_nonzero)
    log_sample_nonzero = np.log2(sample_nonzero)

    M = log_sample_nonzero - log_ref_nonzero
    A = (log_sample_nonzero + log_ref_nonzero) / 2

    readsum_ref = ref_nonzero.sum()
    readsum_sample = sample_nonzero.sum()
    weights = 1. / ((readsum_ref - ref_nonzero) / (readsum_ref * ref_nonzero) +
                    (readsum_sample - sample_nonzero) / (readsum_sample * sample_nonzero))

    M_trim_min, M_trim_max = M.quantile([trim_m, 1 - trim_m])
    A_trim_min, A_trim_max = A.quantile([trim_a, 1 - trim_a])

    trimming_mask = ((M > M_trim_min) & (M < M_trim_max) &
                     (A > A_trim_min) & (A < A_trim_max))
    M_trimmed = M[trimming_mask]
    weights_trimmed = weights[trimming_mask]

    return np.exp2((M_trimmed * weights_trimmed).sum() / weights_trimmed.sum())
开发者ID:hyeshik,项目名称:rnarry,代码行数:28,代码来源:tmm-normalize.py


示例19: gaussPL

def gaussPL(y, rPerp, thetaV, phi, sig, kap):
    """
    Define the gaussian power-law energy profile. This profile is defined by:
            [2^(-x/sig)]^(2*kap),
    where x will be thetaPrime. This profile is a basic gaussian raised to a 
    power-law component (kap) and adjusted such that sig = FWHM.
    
    y [0-1]: The scaled variable in the radial direction. y := R/Rl.
    
    rPerp [0-1]: The perpendicular distance from the LOS in scaled units of Rl.
        Through testing it should not generally be greater than 0.2.
    
    thetaV [0-~pi/4]: The viewing angle, in radians, between the LOS to observer
        and the jet emission axis.
    
    phi [0-pi]: Interior angle of spherical triangle. phi = 0 corresponds to the
        direction toward the main axis from the LOS.
    
    sig : The angular scale (width) of the profile. This value defines the FWHM.
    
    kap : Power-law index on the profile. kap = 0 defines a flat profile. kap < 1
    defines a sharper profile (higher kurtosis). kap > 1 tends toward a Heaviside.
    
    """
    func = np.divide(np.power(thetaPrime(y, rPerp, thetaV, phi), 2.0 * kap), np.power(sig, 2.0 * kap))

    return np.exp2(-func)
开发者ID:nrfrank,项目名称:grba_sims,代码行数:27,代码来源:grbasims.py


示例20: score

    def score(self, X, y=None):
        """Compute score reflecting how well the model has fitted for the input data.

        The scoring method is set using the `scorer` argument in :meth:`~gensim.sklearn_api.ldamodel.LdaTransformer`.
        Higher score is better.

        Parameters
        ----------
        X : iterable of list of (int, number)
            Sequence of documents in BOW format.

        Returns
        -------
        float
            The score computed based on the selected method.

        """
        if self.scorer == 'perplexity':
            corpus_words = sum(cnt for document in X for _, cnt in document)
            subsample_ratio = 1.0
            perwordbound = \
                self.gensim_model.bound(X, subsample_ratio=subsample_ratio) / (subsample_ratio * corpus_words)
            return -1 * np.exp2(-perwordbound)  # returning (-1*perplexity) to select model with minimum value
        elif self.scorer == 'u_mass':
            goodcm = models.CoherenceModel(model=self.gensim_model, corpus=X, coherence=self.scorer, topn=3)
            return goodcm.get_coherence()
        else:
            raise ValueError("Invalid value {} supplied for `scorer` param".format(self.scorer))
开发者ID:RaRe-Technologies,项目名称:gensim,代码行数:28,代码来源:ldamodel.py



注:本文中的numpy.exp2函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python numpy.expand_dims函数代码示例发布时间:2022-05-27
下一篇:
Python numpy.exp函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap