• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python functions.rand函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中pyspark.sql.functions.rand函数的典型用法代码示例。如果您正苦于以下问题:Python rand函数的具体用法?Python rand怎么用?Python rand使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了rand函数的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: _fit

    def _fit(self, dataset):
        est = self.getOrDefault(self.estimator)
        epm = self.getOrDefault(self.estimatorParamMaps)
        numModels = len(epm)
        eva = self.getOrDefault(self.evaluator)
        tRatio = self.getOrDefault(self.trainRatio)
        seed = self.getOrDefault(self.seed)
        randCol = self.uid + "_rand"
        df = dataset.select("*", rand(seed).alias(randCol))
        condition = (df[randCol] >= tRatio)
        validation = df.filter(condition).cache()
        train = df.filter(~condition).cache()

        def singleTrain(paramMap):
            model = est.fit(train, paramMap)
            metric = eva.evaluate(model.transform(validation, paramMap))
            return metric

        pool = ThreadPool(processes=min(self.getParallelism(), numModels))
        metrics = pool.map(singleTrain, epm)
        train.unpersist()
        validation.unpersist()

        if eva.isLargerBetter():
            bestIndex = np.argmax(metrics)
        else:
            bestIndex = np.argmin(metrics)
        bestModel = est.fit(dataset, epm[bestIndex])
        return self._copyValues(TrainValidationSplitModel(bestModel, metrics))
开发者ID:Altiscale,项目名称:spark,代码行数:29,代码来源:tuning.py


示例2: _fit

    def _fit(self, dataset):
        est = self.getOrDefault(self.estimator)
        epm = self.getOrDefault(self.estimatorParamMaps)
        numModels = len(epm)
        eva = self.getOrDefault(self.evaluator)
        nFolds = self.getOrDefault(self.numFolds)
        seed = self.getOrDefault(self.seed)
        h = 1.0 / nFolds
        randCol = self.uid + "_rand"
        df = dataset.select("*", rand(seed).alias(randCol))
        metrics = [0.0] * numModels
        for i in range(nFolds):
            validateLB = i * h
            validateUB = (i + 1) * h
            condition = (df[randCol] >= validateLB) & (df[randCol] < validateUB)
            validation = df.filter(condition)
            train = df.filter(~condition)
            for j in range(numModels):
                model = est.fit(train, epm[j])
                # TODO: duplicate evaluator to take extra params from input
                metric = eva.evaluate(model.transform(validation, epm[j]))
                metrics[j] += metric/nFolds

        if eva.isLargerBetter():
            bestIndex = np.argmax(metrics)
        else:
            bestIndex = np.argmin(metrics)
        bestModel = est.fit(dataset, epm[bestIndex])
        return self._copyValues(CrossValidatorModel(bestModel, metrics))
开发者ID:76402,项目名称:spark,代码行数:29,代码来源:tuning.py


示例3: _fit

    def _fit(self, dataset):
        est = self.getOrDefault(self.estimator)
        epm = self.getOrDefault(self.estimatorParamMaps)
        numModels = len(epm)
        eva = self.getOrDefault(self.evaluator)
        tRatio = self.getOrDefault(self.trainRatio)
        seed = self.getOrDefault(self.seed)
        randCol = self.uid + "_rand"
        df = dataset.select("*", rand(seed).alias(randCol))
        condition = (df[randCol] >= tRatio)
        validation = df.filter(condition).cache()
        train = df.filter(~condition).cache()

        subModels = None
        collectSubModelsParam = self.getCollectSubModels()
        if collectSubModelsParam:
            subModels = [None for i in range(numModels)]

        tasks = _parallelFitTasks(est, train, eva, validation, epm, collectSubModelsParam)
        pool = ThreadPool(processes=min(self.getParallelism(), numModels))
        metrics = [None] * numModels
        for j, metric, subModel in pool.imap_unordered(lambda f: f(), tasks):
            metrics[j] = metric
            if collectSubModelsParam:
                subModels[j] = subModel

        train.unpersist()
        validation.unpersist()

        if eva.isLargerBetter():
            bestIndex = np.argmax(metrics)
        else:
            bestIndex = np.argmin(metrics)
        bestModel = est.fit(dataset, epm[bestIndex])
        return self._copyValues(TrainValidationSplitModel(bestModel, metrics, subModels))
开发者ID:BaiBenny,项目名称:spark,代码行数:35,代码来源:tuning.py


示例4: fit

 def fit(self, dataset, params={}):
     paramMap = self.extractParamMap(params)
     est = paramMap[self.estimator]
     epm = paramMap[self.estimatorParamMaps]
     numModels = len(epm)
     eva = paramMap[self.evaluator]
     nFolds = paramMap[self.numFolds]
     h = 1.0 / nFolds
     randCol = self.uid + "_rand"
     df = dataset.select("*", rand(0).alias(randCol))
     metrics = np.zeros(numModels)
     for i in range(nFolds):
         validateLB = i * h
         validateUB = (i + 1) * h
         condition = (df[randCol] >= validateLB) & (df[randCol] < validateUB)
         validation = df.filter(condition)
         train = df.filter(~condition)
         for j in range(numModels):
             model = est.fit(train, epm[j])
             # TODO: duplicate evaluator to take extra params from input
             metric = eva.evaluate(model.transform(validation, epm[j]))
             metrics[j] += metric
     bestIndex = np.argmax(metrics)
     bestModel = est.fit(dataset, epm[bestIndex])
     return CrossValidatorModel(bestModel)
开发者ID:AllenWeb,项目名称:spark,代码行数:25,代码来源:tuning.py


示例5: test_rand_functions

 def test_rand_functions(self):
     df = self.df
     from pyspark.sql import functions
     rnd = df.select('key', functions.rand()).collect()
     for row in rnd:
         assert row[1] >= 0.0 and row[1] <= 1.0, "got: %s" % row[1]
     rndn = df.select('key', functions.randn(5)).collect()
     for row in rndn:
         assert row[1] >= -4.0 and row[1] <= 4.0, "got: %s" % row[1]
开发者ID:uncleGen,项目名称:ps-on-spark,代码行数:9,代码来源:tests.py


示例6: test_rand_functions

    def test_rand_functions(self):
        df = self.df
        from pyspark.sql import functions
        rnd = df.select('key', functions.rand()).collect()
        for row in rnd:
            assert row[1] >= 0.0 and row[1] <= 1.0, "got: %s" % row[1]
        rndn = df.select('key', functions.randn(5)).collect()
        for row in rndn:
            assert row[1] >= -4.0 and row[1] <= 4.0, "got: %s" % row[1]

        # If the specified seed is 0, we should use it.
        # https://issues.apache.org/jira/browse/SPARK-9691
        rnd1 = df.select('key', functions.rand(0)).collect()
        rnd2 = df.select('key', functions.rand(0)).collect()
        self.assertEqual(sorted(rnd1), sorted(rnd2))

        rndn1 = df.select('key', functions.randn(0)).collect()
        rndn2 = df.select('key', functions.randn(0)).collect()
        self.assertEqual(sorted(rndn1), sorted(rndn2))
开发者ID:EugenCepoi,项目名称:spark,代码行数:19,代码来源:tests.py


示例7: split_data

def split_data(frame, num_folds, tc=TkContext.implicit):
    """
    Randomly split data based on num_folds specified. Implementation logic borrowed from pyspark.
    :param frame: The frame to be split into train and validation frames
    :param num_folds: Number of folds to be split into
    :param tc: spark-tk context passed implicitly
    :return: train frame and test frame for each fold
    """
    from pyspark.sql.functions import rand
    df = frame.dataframe
    h = 1.0/num_folds
    rand_col = "rand_1"
    df_indexed = df.select("*", rand(0).alias(rand_col))
    for i in xrange(num_folds):
        test_lower_bound = i*h
        test_upper_bound = (i+1)*h
        condition = (df_indexed[rand_col] >= test_lower_bound) & (df_indexed[rand_col] < test_upper_bound)
        test_df = df_indexed.filter(condition)
        train_df = df_indexed.filter(~condition)
        train_frame = tc.frame.create(train_df)
        test_frame = tc.frame.create(test_df)
        yield train_frame, test_frame
开发者ID:Haleyo,项目名称:spark-tk,代码行数:22,代码来源:cross_validate.py


示例8: _transform

 def _transform(self, dataset):
     return dataset.withColumn("prediction",
                               dataset.feature + (rand(0) * self.getInducedError()))
开发者ID:Bella-Lin,项目名称:spark,代码行数:3,代码来源:tests.py


示例9: spark_stratified_split

def spark_stratified_split(
    data,
    ratio=0.75,
    min_rating=1,
    filter_by="user",
    col_user=DEFAULT_USER_COL,
    col_item=DEFAULT_ITEM_COL,
    col_rating=DEFAULT_RATING_COL,
    seed=42,
):
    """Spark stratified splitter
    For each user / item, the split function takes proportions of ratings which is
    specified by the split ratio(s). The split is stratified.

    Args:
        data (spark.DataFrame): Spark DataFrame to be split.
        ratio (float or list): Ratio for splitting data. If it is a single float number
            it splits data into two halves and the ratio argument indicates the ratio of
            training data set; if it is a list of float numbers, the splitter splits
            data into several portions corresponding to the split ratios. If a list is
            provided and the ratios are not summed to 1, they will be normalized.
            Earlier indexed splits will have earlier times
            (e.g the latest time per user or item in split[0] <= the earliest time per user or item in split[1])
        seed (int): Seed.
        min_rating (int): minimum number of ratings for user or item.
        filter_by (str): either "user" or "item", depending on which of the two is to filter
            with min_rating.
        col_user (str): column name of user IDs.
        col_item (str): column name of item IDs.

    Returns:
        list: Splits of the input data as spark.DataFrame.
    """
    if not (filter_by == "user" or filter_by == "item"):
        raise ValueError("filter_by should be either 'user' or 'item'.")

    if min_rating < 1:
        raise ValueError("min_rating should be integer and larger than or equal to 1.")

    multi_split, ratio = process_split_ratio(ratio)

    split_by_column = col_user if filter_by == "user" else col_item

    if min_rating > 1:
        data = min_rating_filter_spark(
            data,
            min_rating=min_rating,
            filter_by=filter_by,
            col_user=col_user,
            col_item=col_item,
        )

    ratio = ratio if multi_split else [ratio, 1 - ratio]
    ratio_index = np.cumsum(ratio)

    window_spec = Window.partitionBy(split_by_column).orderBy(rand(seed=seed))

    rating_grouped = (
        data.groupBy(split_by_column)
        .agg({col_rating: "count"})
        .withColumnRenamed("count(" + col_rating + ")", "count")
    )
    rating_all = data.join(broadcast(rating_grouped), on=split_by_column)

    rating_rank = rating_all.withColumn(
        "rank", row_number().over(window_spec) / col("count")
    )

    splits = []
    for i, _ in enumerate(ratio_index):
        if i == 0:
            rating_split = rating_rank.filter(col("rank") <= ratio_index[i])
        else:
            rating_split = rating_rank.filter(
                (col("rank") <= ratio_index[i]) & (col("rank") > ratio_index[i - 1])
            )

        splits.append(rating_split)

    return splits
开发者ID:David-Li-L,项目名称:recommenders,代码行数:80,代码来源:spark_splitters.py


示例10: SparkConf

from pyspark.sql.functions import rand, randn, mean, min, max
from pyspark.sql.context import SQLContext
from pyspark.context import SparkConf, SparkContext

conf = SparkConf().setMaster("local").setAppName("sparkDataFrame")
sc = SparkContext(conf = conf)
sqlcontext = SQLContext(sc)

# 1. Create a DataFrame with one int column and 10 rows.
df = sqlcontext.range(0, 10)
df.show()

# Generate two other columns using uniform distribution and normal distribution.
df.select("id", rand(seed=10).alias("uniform"), randn(seed=27).alias("normal"))
df.show()

# 2. Summary and Descriptive Statistics
df = sqlcontext.range(0, 10).withColumn('uniform', rand(seed=10)).withColumn('normal', randn(seed=27))
df.describe('uniform', 'normal').show()

df.select([mean('uniform'), min('uniform'), max('uniform')]).show()

# 3. Sample covariance and correlation
# Covariance is a measure of how two variables change with respect to each other. 
# A positive number would mean that there is a tendency that as one variable increases, 
# the other increases as well. 
# A negative number would mean that as one variable increases, 
# the other variable has a tendency to decrease.
df = sqlcontext.range(0, 10).withColumn('rand1', rand(seed=10)).withColumn('rand2', rand(seed=27))
df.stat.cov('rand1', 'rand2')
df.stat.cov('id', 'id')
开发者ID:xialei,项目名称:sparkme,代码行数:31,代码来源:sparkDataFrame.py


示例11: display

# COMMAND ----------

from pyspark.sql.functions import rand, randn
# Create a DataFrame with one int column and 10 rows.
df = sqlContext.range(0, 10)
df.show()

# COMMAND ----------

display(df)

# COMMAND ----------

# Generate two other columns using uniform distribution and normal distribution.
df.select("id", rand(seed=10).alias("uniform"), randn(seed=27).alias("normal")).show()


# COMMAND ----------

display(df.select("id", rand(seed=10).alias("uniform"), randn(seed=27).alias("normal")))

# COMMAND ----------

# MAGIC %md ### Summary and Descriptive Statistics
# MAGIC 
# MAGIC The first operation to perform after importing data is to get some sense of what it looks like. For numerical columns, knowing the descriptive summary statistics can help a lot in understanding the distribution of your data. The function `describe` returns a DataFrame containing information such as number of non-null entries (count), mean, standard deviation, and minimum and maximum value for each numerical column.

# COMMAND ----------

from pyspark.sql.functions import rand, randn
开发者ID:dennyglee,项目名称:databricks,代码行数:30,代码来源:Statistical+and+Mathematical+Functions+with+DataFrames+in+Spark.py


示例12: combine_matrix

def combine_matrix(X, y, top = 4):
    """Create the data matrix for predictive modeling

    Notes: The default top n number is 5

    Args:
        X(SparkSQL DataFrame):
        y(SparkSQL DataFrame):

    Return:
        matrixAll(SparkSQL DataFrame):

    """
    # logging.info('Creating the big matrix X:y...')
    # y = hc.createDataFrame(y)
    ### Change y's column name 'serial_number' to 'SN'
    y = y.withColumnRenamed('serial_number', 'SN')
    ### Join X and y on serial_number, SN
    ### Add a new column 'y' specify return (1) or pass (0)
    matrixAll = (X.join(y, X.serial_number == y.SN, how = 'left_outer')
                  .withColumn('y', y['SN'].isNotNull().cast('int')))

    # matrixAll.cache()
    ### Drop row that has null values
    matrixAllDropNa = matrixAll.dropna(how = 'any')
    
    # matrixAllDropNa.cache()
    print 'to pandas()'
    symptomLocationPdf = matrixAllDropNa[['check_in_code', 'fail_location']].toPandas()
    print 'complete toPandas()'
    # locationPdf = matrixAllDropNa[['fail_location']].toPandas()
    #return symptomPdf
    #return matrixAllDropNa, matrixAll
    
    codeSeries = symptomLocationPdf['check_in_code'].value_counts()
    #print codeSeries
    locationSeries = symptomLocationPdf['fail_location'].value_counts()
    ### Top N = 5 symptoms
    codeDict = {}
    locationDict = {}
    for i in range(top):
        # top n check in codes
        code = codeSeries.index[i]
        #codeLabel = 'code_{}'.format(i)
        codeLabel = '{}'.format(code)
        codeDict[code] = codeSeries[i]
        print 'top {} symptom: {}, count: {}'.format(i+1, code, codeSeries[i])
        matrixAll = (matrixAll.withColumn(codeLabel, (matrixAll['check_in_code'].like('%{}'.format(code))).cast('int'))
                              .fillna({codeLabel: 0}))

        # top n fail locations
        location = locationSeries.index[i]
        #locationLabel = 'location_{}'.format(i)
        locationLabel = '{}'.format(location)
        locationDict[location] = locationSeries[i]
        #print location
        print 'top {} fail location: {}, count: {}'.format(i+1, location, locationSeries[i])
        matrixAll = (matrixAll.withColumn(locationLabel, (matrixAll['fail_location'].like('%{}'.format(location))).cast('int'))
                              .fillna({locationLabel: 0}))

    # add a random integer column from 1 to 100 for later on sampling of training samples
    matrixAllRandDf = matrixAll.withColumn('random', rand())

    # transform the float random number to integer between 1 to 100
    matrixAllIntDf = matrixAllRandDf.withColumn('randInt', (matrixAllRandDf.random * 100).cast('int'))
    
    # cache the whole matrix table
    matrixAllIntDf.cache()
    
    return matrixAllIntDf
开发者ID:kuanliang,项目名称:return-products,代码行数:70,代码来源:Transform.py



注:本文中的pyspark.sql.functions.rand函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python functions.sum函数代码示例发布时间:2022-05-27
下一篇:
Python functions.pandas_udf函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap