-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy path3care_site_ext.py
73 lines (57 loc) · 2.49 KB
/
3care_site_ext.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
# version History
# Sept 2022, Stephanie Hong initial version
# May 2023, Yvette Chen, use input filepath, database name and vocabulary dbname parameter
#
# Databricks notebook source
##dbutils.fs.ls('/mnt/crisp-covid/sftp/JHU_Full_Load_09_20_2021_Parquet/')
dbutils.fs.ls('/mnt/crisp-covid/sftp/')
dbutils.widgets.removeAll()
dbutils.widgets.text("parquetFilePath", "/mnt/crisp-covid/sftp/JHU_Full_Load_09_08_2022_Parquet/", "CRISP parquet file path")
dbutils.widgets.text("databaseName", "crisp_08sept2022", "CRISP database name")
dbutils.widgets.text("omop vocabulary files", "omop_vocab", "OMOP vocabulary database name")
dbutils.widgets.text("previousDatabaseName", "crisp_sep2022", "Previous Database name")
path=dbutils.widgets.get("parquetFilePath")
databaseName=dbutils.widgets.get("databaseName")
previousDatabaseName=dbutils.widgets.get("previousDatabaseName")
# COMMAND ----------
###tblproperties(delta.autoOptimize.optimizeWrite=true)
spark.sql("set spark.databricks.delta.autoCompact.enabled = true")
spark.sql("set spark.databricks.delta.autoOptimize.optimizeWrite=true")
spark.sql("SET spark.databricks.delta.formatCheck.enabled=false")
spark.sql("SET spark.databricks.delta.overwriteSchema=true")
spark.sql("set spark.databricks.delta.commitValidation.enabled=false")
tablename="care_site"
path="crisp-covid/warehouse/"+databaseName
location="/mnt/{path}/{tablename}"
spark.sql(f"""CREATE OR REPLACE TABLE {databaseName}.{tablename}(
care_site_id int,
care_site_name string,
place_of_service_concept_id int,
location_id int,
care_site_source_value string,
place_of_service_source_value string
)
using delta
location "/mnt/{path}/{tablename}"
""")
# COMMAND ----------
dfcaresite = spark.sql(f"""
select
care_site_id,
care_site_name,
place_of_service_concept_id,
location_id,
care_site_source_value,
place_of_service_source_value
from {previousDatabaseName}.care_site
""")
# COMMAND ----------
### write and persist the content
write_format = 'delta'
##load_path = '/databricks-datasets/learning-spark-v2/people/people-10m.delta'
##partition_by = 'gender_concept_id'
table_name = 'care_site'
###dfcaresite.write.format("delta").option("mergeSchema","true").mode("overwrite").save(f"{dbname}.{tablename}") we should have selectExpr("*") when we pass the dbname and tablename as parameters.
dfcaresite.selectExpr("*").write.format("delta").option("mergeSchema","true").mode("overwrite").saveAsTable(f"{databaseName}.{tablename}")
#added partitionBy to increase performance
# COMMAND ----------