ablog

不器用で落着きのない技術者のメモ

Glue PySpark で CSV のカラム内の改行コードを置換する

Glue PySpark で CSV のカラム内の改行コードを置換する例。Spark では正規表現Java の記法になる。

newDf = df.withColumn("col2", regexp_replace(col("col2"), "\\n|\\r", " "))
  • サンプルコード全量
import sys
from awsglue.transforms import *
from awsglue.utils import getResolvedOptions
from pyspark.context import SparkContext
from awsglue.context import GlueContext
from awsglue.job import Job

from pyspark.sql.functions import *
from awsglue.dynamicframe import DynamicFrame


## @params: [JOB_NAME]
args = getResolvedOptions(sys.argv, ['JOB_NAME'])

sc = SparkContext()
glueContext = GlueContext(sc)
spark = glueContext.spark_session
job = Job(glueContext)
job.init(args['JOB_NAME'], args)
## @type: DataSource
## @args: [database = "default", table_name = "newline_test", transformation_ctx = "datasource0"]
## @return: datasource0
## @inputs: []
datasource0 = glueContext.create_dynamic_frame.from_catalog(database = "default", table_name = "newline_test", transformation_ctx = "datasource0")
## @type: ApplyMapping
## @args: [mapping = [("col0", "string", "col0", "string"), ("col1", "string", "col1", "string"), ("col2", "string", "col2", "string"), ("col3", "string", "col3", "string")], transformation_ctx = "applymapping1"]
## @return: applymapping1
## @inputs: [frame = datasource0]
applymapping1 = ApplyMapping.apply(frame = datasource0, mappings = [("col0", "string", "col0", "string"), ("col1", "string", "col1", "string"), ("col2", "string", "col2", "string"), ("col3", "string", "col3", "string")], transformation_ctx = "applymapping1")

# col2 カラムの改行コード(\n、\r)をスペースに置換
df = DynamicFrame.toDF(applymapping1)
newDf = df.withColumn("col2", regexp_replace(col("col2"), "\\n|\\r", " "))
result = DynamicFrame.fromDF(newDf, glueContext, "result")

## @type: DataSink
## @args: [connection_type = "s3", connection_options = {"path": "s3://dl-sfdc-dm/test/newline_test"}, format = "json", transformation_ctx = "datasink2"]
## @return: datasink2
## @inputs: [frame = applymapping1]
datasink2 = glueContext.write_dynamic_frame.from_options(frame = result, connection_type = "s3", connection_options = {"path": "s3://dl-sfdc-dm/test/newline_test"}, format = "csv", transformation_ctx = "datasink2")
job.commit()