• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python client.Client类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中snakebite.client.Client的典型用法代码示例。如果您正苦于以下问题:Python Client类的具体用法?Python Client怎么用?Python Client使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了Client类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: signature

 def signature(self):
     client = Client(self._host, self._port, effective_user=self._user, use_trash=False)
     stats = client.stat([self._partial])
     if stats['file_type'] == 'f':
         return "modification_time:{}".format(stats['modification_time'])
     else:
         return stats['file_type']
开发者ID:lexman,项目名称:tuttle,代码行数:7,代码来源:hdfs.py


示例2: EffectiveUserTest

class EffectiveUserTest(MiniClusterTestBase):
    ERR_MSG_TOUCH = "org.apache.hadoop.security.AccessControlException\nPermission denied: user=__foobar"
    ERR_MSG_STAT = "`/foobar2': No such file or directory"

    VALID_FILE = '/foobar'
    INVALID_FILE = '/foobar2'

    def setUp(self):
        self.custom_client = Client(self.cluster.host, self.cluster.port)
        self.custom_foobar_client = Client(host=self.cluster.host,
                                           port=self.cluster.port,
                                           effective_user='__foobar')

    def test_touch(self):
        print tuple(self.custom_client.touchz([self.VALID_FILE]))
        try:
            tuple(self.custom_foobar_client.touchz([self.INVALID_FILE]))
	except Exception, e:
            self.assertTrue(e.message.startswith(self.ERR_MSG_TOUCH))

        self.custom_client.stat([self.VALID_FILE])
        try:
            self.custom_client.stat([self.INVALID_FILE])
        except Exception, e:
            self.assertEquals(e.message, self.ERR_MSG_STAT)
开发者ID:3rwww1,项目名称:snakebite,代码行数:25,代码来源:effective_user_test.py


示例3: test

def test():
    """
    """
    client = Client("192.168.99.100", 9000)
    for f in client.ls(['/files']):
        print f
        for line in client.cat([f.get('path')]):
            for l in line:
                print l
开发者ID:alfonsokim,项目名称:scripts,代码行数:9,代码来源:test.py


示例4: run

    def run(self):
        c = Client(self.host, self.port)

        listing = c.ls([self.log_path], recurse=True)
        events = []
        for f in listing:
            path = f['path']

            if not path.endswith('.jhist'):
                continue

            ts = arrow.get(f['modification_time']/1000)

            if ts <= self.checktime:
                continue

            job_id = job_pattern.match(path.split('/')[-1]).group(0)

            if job_id in self.jobs and self.jobs[job_id] >= ts.timestamp*1000:
                log.debug('Skipping processed job: ' + job_id)
                continue

            config_path = path[:path.rfind('/')]+'/'+job_id+'_conf.xml'

            event = {
                'inviso.type': 'mr2',
                'job.id': job_id,
                'application.id': job_id.replace('job_', 'application_'),
                'job.type': 'mr2',
                'file.type': ['history', 'config'],
                'jobflow' : self.jobflow,
                'cluster.id': self.cluster_id,
                'cluster': self.cluster_name,
                'history.uri': 'hdfs://%s:%s%s' % (self.host,self.port,path),
                'config.uri':'hdfs://%s:%s%s' % (self.host,self.port,config_path),
                'host': self.host,
                'port': self.port,
                'timestamp': str(ts),
                'epoch': f['modification_time'],
                'mapreduce.version': 'mr2'
            }

            log.info('Publishing event: (%s) %s %s' % (event['cluster'], event['job.id'], ts))
            events.append(event)
        for chunk in [events[i:i + self.chunk_size] for i in xrange(0, len(events), self.chunk_size)]:
            self.publisher.publish(chunk)
开发者ID:Netflix,项目名称:inviso,代码行数:46,代码来源:monitor.py


示例5: delete_item

def delete_item(config, filepath='', localpath=''):

    if(config['BACKEND'] == 'hdfs'):
        client = Client(socket.gethostname(), config['HADOOP_RPC_PORT'], use_trash=False)
        del_gen = client.delete([filepath], recurse=True)
        for del_item in del_gen:
            pass
    elif(config['BACKEND'] == 'swift'):
        pass  # To be implemented

    # Deleting modules or datasets from local directories (will also suffice for nfs backend)
    if(os.path.isdir(localpath)):  # Check if it is a dataset
        shutil.rmtree(localpath)
    else:
        try:
            os.remove(localpath)
        except OSError:
            pass
开发者ID:CSC-IT-Center-for-Science,项目名称:spark-analysis,代码行数:18,代码来源:helper.py


示例6: crfalign

def crfalign(sc, inputFilename, outputDirectory, 
            limit=LIMIT, location='hdfs', outputFormat="text", partitions=None, deleteFirst=True):

    # crfConfigDir = os.path.join(os.path.dirname(__file__), "data/config")
    # def cpath(n):
    #     return os.path.join(crfConfigDir, n)

    # smEyeColor = HybridJaccard(ref_path=cpath("eyeColor_reference_wiki.txt"),
    #                            config_path=cpath("eyeColor_config.txt"))
    # smHairColor = HybridJaccard(ref_path=cpath("hairColor_reference_wiki.txt"),
    #                             config_path=cpath("hairColor_config.txt"))
    # print smEyeColor, smHairColor

    if location == "hdfs":
        if deleteFirst:
            namenode = "memex-nn1"
            port = 8020
            client = Client(namenode, 8020, use_trash=True)
            try:
                for deleted in client.delete([outputDirectory], recurse=True):
                    print deleted
            except FileNotFoundException as e:
                pass

    # hypothesis1: data fetched this way prompts the lzo compression error
    # hypothesis2: but it doesn't matter, error is just a warning
    rdd_crfl = sc.textFile(inputFilename)
    rdd_crfl.setName('rdd_crfl')

    if limit:
        rdd_crfl = sc.parallelize(rdd_crfl.take(limit))
    if partitions:
        rdd_crfl = rdd_crfl.repartition(partitions)

    rdd_final = rdd_crfl
    print outputFormat
    if outputFormat == "sequence":
        rdd_final.saveAsSequenceFile(outputDirectory)
    elif outputFormat == "text":
        print "saving to %s" % outputDirectory
        rdd_final.saveAsTextFile(outputDirectory)
    else:
        raise RuntimeError("Unrecognized output format: %s" % outputFormat)
开发者ID:cjsanjay,项目名称:dig-crf,代码行数:43,代码来源:crfminimal.py


示例7: __init__

 def __init__(self, workflow, **kwargs):
     super(HDFSTextLoader, self).__init__(workflow, **kwargs)
     self.file_name = kwargs["file"]
     self.chunk_lines_number = kwargs.get("chunk", 1000)
     client_kwargs = dict(kwargs)
     del client_kwargs["file"]
     if "chunk" in kwargs:
         del client_kwargs["chunk"]
     self.hdfs_client = Client(**client_kwargs)
     self.output = [""] * self.chunk_lines_number
     self.finished = Bool()
开发者ID:2php,项目名称:veles,代码行数:11,代码来源:hdfs_loader.py


示例8: getTrainedModel

def getTrainedModel(hdfsServer, modelFile):
    hdfsPort = int(os.environ.get('HDFS_NAME_PORT', 8020))
    modelSavePath = "/user/" + os.getenv('LOGNAME') + "/data/model/" + modelFile + '/'

    # Load the saved model data
    hdfs_client = Client(hdfsServer, hdfsPort)
    filesInfo = hdfs_client.ls([modelSavePath])

    # Copy HDFS files to local temp directory
    # First clean up and recreate the temp folder
    copyDir = tempfile.gettempdir() + "/" + modelFile
    shutil.rmtree(copyDir, ignore_errors=True)
    os.makedirs(copyDir)
    res = hdfs_client.copyToLocal([f['path'] for f in filesInfo], copyDir)
    for r in res:
        if not r['result']:
            print "Error: %s" % r

    modelFilePath = copyDir + '/' + modelFile
    print "Load model from  %s" % modelFilePath
    return joblib.load(modelFilePath)
开发者ID:patng323,项目名称:w205-course-project,代码行数:21,代码来源:processStream.py


示例9: getObjsBackend

def getObjsBackend(objs, backend, config):

    if(backend == 'hdfs'):

        client = Client(socket.gethostname(), config['HADOOP_RPC_PORT'], use_trash=False)

        for obj in objs:
                try:
                    copy_gen = client.copyToLocal([obj[0]], obj[1])
                    for copy_item in copy_gen:
                        pass
                except Exception as e:
                        print(e)
    elif(backend == 'swift'):

        options = {'os_auth_url': os.environ['OS_AUTH_URL'], 'os_username': os.environ['OS_USERNAME'], 'os_password': os.environ['OS_PASSWORD'], 'os_tenant_id': os.environ['OS_TENANT_ID'], 'os_tenant_name': os.environ['OS_TENANT_NAME']}
        swiftService = SwiftService(options=options)

        for obj in objs:

            # Create the containers which are used in this application for Object Storage
            if(obj[0] == 'sqlite.db'):
                swiftService.post(container='containerFiles')
                swiftService.post(container='containerFeatures')
                swiftService.post(container='containerModules')

            out_file = obj[1]  # Get the output file location from runner
            localoptions = {'out_file': out_file}
            objects = []
            objects.append(obj[0])
            swiftDownload = swiftService.download(container='containerModules', objects=objects, options=localoptions)

            for downloaded in swiftDownload:
                if("error" in downloaded.keys()):
                    raise RuntimeError(downloaded['error'])
                # print(downloaded)

    elif(backend == 'nfs'):  # Every file is already in respective local dirs
        pass
开发者ID:CSC-IT-Center-for-Science,项目名称:spark-analysis,代码行数:39,代码来源:helper.py


示例10: HDFSStat

class HDFSStat(object):

    cluster = 'hostname'
    port = 8020
    default_path = '/user/hive/warehouse'

    @staticmethod
    def build_path(table):
        nm = table.split('.')[0]
        tb = table.split('.')[1]
        return default_path + '/' + nm + '.db/' + tb

    def __init__(self):
        self.client = Client(HDFSStat.cluster, HDFSStat.port, use_trash=False)

    def latest_partition(self, table_name, table_path=None):
        t_path = HDFSStat.build_path(table_name) if table_path is None else table_path
        latest_dir = list(self.client.ls([t_path])).pop()
        return path.basename(latest_dir['path']).split('=')[1]

    def poke_partition(self, table_name, partition_name, partition, table_path=None):
        t_path = HDFSStat.build_path(table_name) if table_path is None else table_path
        partition_path = t_path + '/' + partition_name + '=' + partition
        return self.client.test(partition_path, exists=True, directory=True, zero_length=False)
开发者ID:imsid,项目名称:kickstarter,代码行数:24,代码来源:hdfsstat.py


示例11: HDFSTextLoader

class HDFSTextLoader(Unit, TriviallyDistributable):
    def __init__(self, workflow, **kwargs):
        super(HDFSTextLoader, self).__init__(workflow, **kwargs)
        self.file_name = kwargs["file"]
        self.chunk_lines_number = kwargs.get("chunk", 1000)
        client_kwargs = dict(kwargs)
        del client_kwargs["file"]
        if "chunk" in kwargs:
            del client_kwargs["chunk"]
        self.hdfs_client = Client(**client_kwargs)
        self.output = [""] * self.chunk_lines_number
        self.finished = Bool()

    def initialize(self):
        self.debug("Opened %s", self.hdfs_client.stat([self.file_name]))
        self._generator = self.hdfs_client.text([self.file_name])

    def run(self):
        assert not self.finished
        try:
            for i in range(self.chunk_lines_number):
                self.output[i] = next(self._generator)
        except StopIteration:
            self.finished <<= True
开发者ID:2php,项目名称:veles,代码行数:24,代码来源:hdfs_loader.py


示例12: __init__

    def __init__(self, path, name_node, hive_server,
                 user="root", hive_db="default", password=None, nn_port=8020, hive_port=10000):

        # HDFS Connection
        self._client = Client(name_node, nn_port)

        self._db = hive_db

        # Hive Connection
        self._hive = pyhs2.connect(host=hive_server,
                                   port=hive_port,
                                   authMechanism="PLAIN",
                                   database=hive_db,
                                   user=user,
                                   password=password)
        self._path = path
开发者ID:grundprinzip,项目名称:pyxplorer,代码行数:16,代码来源:loader.py


示例13: __init__

	def __init__(self,topic,user,server,port,web_port,base,hdfs_tmp):
		self.topic = topic
		self.username = user
		self.server = server
		self.port = port
		self.base = base
		self.path = ["%s/%s" % (base,topic)]
		self.hdfs_tmp = hdfs_tmp

		try:
			self.client=Client(server,port,effective_user=user) 
			self.hdfsclient=hdfs.client.InsecureClient(\
                 "http://%s:%d" % (server,web_port),user=user)
	 		self.daylist=self.check()
		except:
	 		print "Base path %s does not contain valid structure" % (base)
	 		raise	
开发者ID:agrebin,项目名称:snappymerge,代码行数:17,代码来源:snappymerge.py


示例14: int

import argparse
import subprocess

parser = argparse.ArgumentParser()
parser.add_argument("--hdfs", help="HDFS FS name", default = 'localhost')
parser.add_argument("--model", help="Name of model file", default = 'belt.model')
args = parser.parse_args()


hdfsServer = args.hdfs
hdfsPort = int(os.environ.get('HDFS_NAME_PORT', 8020))
hdfsHost = "hdfs://" + hdfsServer + ":" + str(hdfsPort)
modelSavePath = "/user/" + os.getenv('LOGNAME') + "/data/model/" + args.model + "/"
print "hdfs=%s, savePath=%s, hdfsHost=%s" % (hdfsServer, modelSavePath, hdfsHost)

hdfs_client = Client(hdfsServer, hdfsPort)

X_train_file = hdfs_client.text(["/user/" + os.getenv('LOGNAME') + "/data/X_train.txt"]).next()
y_train_file = hdfs_client.text(["/user/" + os.getenv('LOGNAME') + "/data/y_train.txt"]).next()

X_train = np.genfromtxt(str.splitlines(X_train_file))
y_train = np.genfromtxt(str.splitlines(y_train_file))

clf = LogisticRegression()
clf = clf.fit(X_train, y_train)

files = joblib.dump(clf, "belt.model")

subprocess.check_call(['hdfs', 'dfs', '-rm', '-r', '-f', modelSavePath], shell=False)
subprocess.check_call(['hdfs', 'dfs', '-mkdir', '-p', modelSavePath], shell=False)
开发者ID:patng323,项目名称:w205-course-project,代码行数:30,代码来源:trainModel.py


示例15: Client

from snakebite.client import Client

client = Client('localhost', 9000)
for f in client.copyToLocal(['/input/input.txt'], '/tmp'):
   print f
开发者ID:CedarLogic,项目名称:HadoopWithPython,代码行数:5,代码来源:copy_to_local.py


示例16: Client

from snakebite.client import Client
from constants import *

client = Client('localhost', NAMENODE_PORT)

for p in client.delete(['/foo/bar','/input'], recurse=True):
    print p
开发者ID:altock,项目名称:dev,代码行数:7,代码来源:delete.py


示例17: Client

import os
from snakebite.client import Client

# provide the Internet Process Communcation Port
INTERNET_PROCESS_CIOMMUNICATION_PORT = "..."

# provide the Name Node of Hadoop
NAME_NODE = "..."

# and get the client of HDFS
CLIENT_HDFS = Client(NAME_NODE, INTERNET_PROCESS_CIOMMUNICATION_PORT)


def read_hdfs_file(file_path_and_name)
    """Reads an hdfs file
    :param meta_info_file: the path and the file to read
    """

    # 1. gets the hdfs file object
    for file_contents in CLIENT_HDFS.text([hdfs_file_name]):
        file_unicode = file_contents.decode('unicode-escape')
        file_obj = StringIO(file_unicode)

    # 2. read and operate on top:
    file_lines = get_hdfs_file_obj(meta_info_file).readlines()
    for line in file_lines:
        # ...
        # do operations on the file
开发者ID:tsarouch,项目名称:recycling_python_code,代码行数:28,代码来源:hdfs_reader.py


示例18: Client

#!/usr/bin/env python

import os
from snakebite.client import Client


client = Client("trevally.amer.nevint.com", 9000, use_trash=False, effective_user='hadoop')


#for res in client.mkdir(['/user/hadoop/test/move/file'],create_parent=True, mode=755):
#    print res

for res in client.rename(['/user/hadoop/test.tar'],'/user/hadoop/test3.tar'):
    print res

开发者ID:blueskywalker,项目名称:junkyard,代码行数:14,代码来源:move_files.py


示例19: is_exist

def is_exist(dirPath, master = public.SPARK_MASTER, port = public.SPARK_MASTER_PORT):
    client = Client(master, port, use_trash=False)
    return client.test(dirPath, exists=True, directory=True)
开发者ID:yudongjin,项目名称:public,代码行数:3,代码来源:file_spark.py


示例20: HDFS_topic

class HDFS_topic(object):
	def __init__(self,topic,user,server,port,web_port,base,hdfs_tmp):
		self.topic = topic
		self.username = user
		self.server = server
		self.port = port
		self.base = base
		self.path = ["%s/%s" % (base,topic)]
		self.hdfs_tmp = hdfs_tmp

		try:
			self.client=Client(server,port,effective_user=user) 
			self.hdfsclient=hdfs.client.InsecureClient(\
                 "http://%s:%d" % (server,web_port),user=user)
	 		self.daylist=self.check()
		except:
	 		print "Base path %s does not contain valid structure" % (base)
	 		raise	
	
	#
	# Check basic hdfs access and that directory format is appropiate
	# also builds datelist structure
	#
	def check(self):
		self.content=self.client.ls(self.path)
		ret=[]
		for item in self.content:
			(head,tail) = os.path.split(item['path'])
			try:
				parse(tail,yearfirst=True,dayfirst=True)
				if item['file_type'] == 'd':
					ret.append(tail)
				else:
					print("WARNING: %s is not a directory, skipping\n" % (item['path']))
			except:
				print("WARNING: %s is not in date format, skipping\n"  % (tail))

		if len(ret) > 0:
			ret.sort(key=lambda x: datetime.strptime(x,"%Y-%m-%d"))
			return ret
		else:
			return false

	#
	# Give a date, check if that date is on the dirlist and return matching dir entry
	#
	def day_in_topic(self, date):
		for item in self.daylist:
			if parse(date) == parse(item):
				return item
		return False


	#
	# Check and validates date_from and date_to arguments
	#	
	def check_date_range(self,date_from,date_to):
		if date_from:
			try:
				parse(date_from)
			except:
				raise ValueError("FATAL: start date (%s) invalid date format" % (date_from) )
		
			if ( parse(date_from)  < parse(self.daylist[0])  ) or ( parse(date_from)  > parse(self.daylist[-1]) ):
				raise ValueError("FATAL: start date (%s) not in range (%s ---> %s)" % (date_from,self.daylist[0],self.daylist[-1]))
			else:
				ret_from=parse(date_from).strftime("%Y-%m-%d") 
				while not self.day_in_topic(ret_from):
					print "WARNING: start date %s not in topic %s, trying next day" % (ret_from,self.topic)
					ret_from=datetime.strftime((parse(ret_from)+timedelta(days=1)), "%Y-%m-%d" )
					
				ret_from=self.day_in_topic(ret_from)

				
		else:
				ret_from=self.daylist[0]

		if date_to:
			try:
				parse(date_to)
			except:
				raise ValueError("FATAL: end date (%s) invalid date format" % (date_to) )

			if ( parse(date_to)  < parse(self.daylist[0])  ) or ( parse(date_to)  > parse(self.daylist[-1]) ):
				raise ValueError("FATAL: end date (%s) not in range (%s ---> %s)" % (date_to,self.daylist[0],self.daylist[-1]))
			else:
				ret_to=parse(date_to).strftime("%Y-%m-%d")
		else:
				ret_to=self.daylist[-1]
		
		if (parse(ret_from) > parse(ret_to) ):
			raise ValueError("FATAL: start date (%s) must be <= end date (%s)" % (ret_from,ret_to))


		return (ret_from,ret_to)
		
	
	#
	#  Traverses the list of valid directories and merges each day
	#
#.........这里部分代码省略.........
开发者ID:agrebin,项目名称:snappymerge,代码行数:101,代码来源:snappymerge.py



注:本文中的snakebite.client.Client类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python formatter.format_bytes函数代码示例发布时间:2022-05-27
下一篇:
Python simulation.CuIBMSimulation类代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap