전체 글 (33) 썸네일형 리스트형 [security] jceks hadoop credential provider api # list hadoop credential list -provider jceks:///dir/filename.jceks # create hadoop credential create myalias -value 'pwd' -provider jceks:///dir/filename.jceks hadoop credential create myalias2 -value 'pwd2' -provider jceks:///dir/filename.jceks def getHadoopCredentail(credProviderPath="jceks:///dir/filename.jceks", pwAlias = None): spark = SparkSession.builder \ .appName(appName1) \ .enableHiv.. [python] Getting startingOffsets value and reading kafka value into spark dataframe from pyspark.sql import SparkSession import pyspark.sql.functions as func # get maxoffset spark1 = SparkSession.builder \ .appName(appName1) \ .config("spark.executor.memory","11g") \ .enableHiveSupport() \ .getOrCreate() strStartOffset = 'earliest' if len(listPath) > 0: dfOffset = spark1.read.parquet(path+"/"+listPath['pathSuffix'].max()) dfOffset.createOrReplaceTempView("temp_offset") dfMaxOff.. [python] get hdfs path info import requests import pandas as pd def getPathInfo(hdfsPath): try: s = requests.session() userName = "hive" #getpass.getuser() operation = "LISTSTATUS" httpFsUrl = "http://hostip:14000" req = "{0}/webhdfs/v1{1}?user.name={2}&op={3}".format(httpFsUrl,hdfsPath,userName,operation) response = s.get(req,headers={'Connection':'close'}) result = response.json()["FileStatuses"]["FileStatus"] dfTmp = pd.. 이전 1 2 3 4 ··· 11 다음