# -*- coding: utf-8 -* from pyspark.sql import SparkSession, HiveContext,DataFrameWriter import argparse import time import numpy as np import pandas as pd spark = SparkSession.builder.enableHiveSupport().appName("test").getOrCreate() start = time.time() ### 数据载入方法1: hdfs上载入parquent格式 input = "/aaa/bbb/ccc" data = spark.read.parquet(input) data.show(5) +-------------------+------+--------------------+ | START_TIME|amount| payerCode| +-------------------+------+--------------------+ |2019-06-28 21:04:37| 10.7|692200000XXXXXXX| |2018-11-24 20:15:40| 19.9|602200000XXXXXXX| |2019-06-19 12:33:14| 2.0|692200000XXXXXXX| |2019-07-03 23:04:12| 5.27|622200000XXXXXXX| |2018-11-26 21:26:30| 2.0|622200000XXXXXXX| +-------------------+------+--------------------+ ## pyspark读取数据方法二:从hive中读取
####### 生成查询的SQL语句,这个跟hive的查询语句一样,所以也可以加where等条件语句 hive_context= HiveContext(spark) hive_read = "select * from {}.{}".format(hive_database, hive_table2) ####### 通过SQL语句在hive中查询的数据直接是dataframe的形式 read_df = hive_context.sql(hive_read) read_df.show(5) +-------------------+------+--------------------+ | START_TIME|amount| payerCode| +-------------------+------+--------------------+ |2019-06-28 21:04:37| 10.7|692200000XXXXXXX| |2018-11-24 20:15:40| 19.9|602200000XXXXXXX| |2019-06-19 12:33:14| 2.0|692200000XXXXXXX| |2019-07-03 23:04:12| 5.27|622200000XXXXXXX| |2018-11-26 21:26:30| 2.0|622200000XXXXXXX| +-------------------+------+--------------------+
tttt = spark.read.csv(filepath,header=’true’,inferSchema=’true’,sep=’,’)
data1.write.mode(SaveMode.Overwrite).parquet(output)
##### 数据存入数据库 hive_database = "testt0618" data1 = data.limit(10)
hive_table1 = "ii" data1.write.format("hive").mode("overwrite").saveAsTable('{}.{}'.format(hive_database, hive_table1))
hive_table2 = "lll" data1.registerTempTable('test_hive') sqlContext.sql("create table {}.{} select * from test_hive".format(hive_database, hive_table2))
output = “/aaa/bbb/ccc” data1.coalesce(1).write.option("sep", "#").option("header", "true").csv(output + "_text",mode='overwrite')
参考相关: