Преобразуйте таблицу DynamoDB в csv и сохраните ее в s3 с помощью AWS Glue Jobs
Я пытаюсь преобразовать таблицу DynamoDB в csv и сохранить ее в s3 с помощью AWS Glue Jobs. Я пробовал следующий сценарий
import sys
from awsglue.transforms import *
from awsglue.utils import getResolvedOptions
from pyspark.context import SparkContext
from awsglue.context import GlueContext
from awsglue.job import Job
## @params: [JOB_NAME]
args = getResolvedOptions(sys.argv, ['JOB_NAME'])
sc = SparkContext()
glueContext = GlueContext(sc)
spark = glueContext.spark_session
job = Job(glueContext)
job.init(args['JOB_NAME'], args)
datasource0 = glueContext.create_dynamic_frame.from_catalog(database = "db", table_name = "table", transformation_ctx = "datasource0")
applymapping1 = ApplyMapping.apply(frame = datasource0, mappings = [("weatherdata.m.rain.s", "string", "weatherdata.m.rain.s", "string"), ("weatherdata.m.temperature.s", "string", "weatherdata.m.temperature.s", "string"), ("weatherdata.m.humidity.s", "string", "weatherdata.m.humidity.s", "string"), ("weatherdata.m.id.s", "string", "weatherdata.m.id.s", "string"), ("weatherdata.m.timestamp.s", "string", "weatherdata.m.timestamp.s", "string")], transformation_ctx = "applymapping1")
##selectfields2 = SelectFields.apply(frame = applymapping1, paths = ["weatherdata", "id", "timestamp"], transformation_ctx = "selectfields2")
##resolvechoice3 = ResolveChoice.apply(frame = selectfields2, choice = "MATCH_CATALOG", database = "weatherapp", table_name = "d7fa0111_81dc_4997_b18a_960a14784b14", transformation_ctx = "resolvechoice3")
##datasink4 = glueContext.write_dynamic_frame.from_catalog(frame = resolvechoice3, database = "weatherapp", table_name = "d7fa0111_81dc_4997_b18a_960a14784b14", transformation_ctx = "datasink4")
datasink2 = glueContext.write_dynamic_frame.from_options(frame = applymapping1, connection_type = "s3", connection_options = {"path": "s3://outputPath/output.csv"}, format = "csv", transformation_ctx = "datasink2")
job.commit()
Я не получил вывода.
Спасибо.