Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: support read_csv for backends with no native support #9908

Open
wants to merge 19 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
98 changes: 98 additions & 0 deletions ibis/backends/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
import abc
import collections.abc
import functools
import glob
import importlib.metadata
import keyword
import re
Expand Down Expand Up @@ -1236,6 +1237,103 @@ def has_operation(cls, operation: type[ops.Value]) -> bool:
f"{cls.name} backend has not implemented `has_operation` API"
)

def read_csv(
self, path: str | Path, table_name: str | None = None, **kwargs: Any
) -> ir.Table:
"""Register a CSV file as a table in the current backend.

Parameters
----------
path
The data source. A string or Path to the CSV file.
table_name
An optional name to use for the created table. This defaults to
a sequentially generated name.
**kwargs
Additional keyword arguments passed to the backend loading function.

Returns
-------
ir.Table
The just-registered table

Examples
--------
Connect to a SQLite database:

>>> con = ibis.sqlite.connect()

Read a single csv file:

>>> table = con.read_csv("path/to/file.csv")

Comment on lines +1308 to +1311
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
Read a single csv file:
>>> table = con.read_csv("path/to/file.csv")
Read a single csv file:
>>> table = con.read_csv("path/to/file.csv")
Read a single csv file, skipping the first row, with a custom delimiter:
>>> table = con.read_csv("path/to/file.csv", skip_rows=1, delimiter=";")
Read a single csv file, but only load the specified columns:
>>> table = con.read_csv("path/to/file.csv", include_columns=["species", "island"])

Read all csv files in a directory:

>>> table = con.read_parquet("path/to/csv_directory/*")

Read all csv files with a glob pattern:

>>> table = con.read_csv("path/to/csv_directory/test_*.csv")

Read csv file from s3:

>>> table = con.read_csv("s3://bucket/path/to/file.csv")

"""
pa = self._import_pyarrow()
import pyarrow.csv as pcsv
from pyarrow import fs

read_options_args = {}
parse_options_args = {}
convert_options_args = {}
memory_pool = None

for key, value in kwargs.items():
if hasattr(pcsv.ReadOptions, key):
read_options_args[key] = value
elif hasattr(pcsv.ParseOptions, key):
parse_options_args[key] = value
elif hasattr(pcsv.ConvertOptions, key):
convert_options_args[key] = value
elif key == "memory_pool":
memory_pool = value
else:
raise ValueError(f"Invalid args: {key!r}")
jitingxu1 marked this conversation as resolved.
Show resolved Hide resolved

read_options = pcsv.ReadOptions(**read_options_args)
parse_options = pcsv.ParseOptions(**parse_options_args)
convert_options = pcsv.ConvertOptions(**convert_options_args)
if memory_pool:
memory_pool = pa.default_memory_pool()
jitingxu1 marked this conversation as resolved.
Show resolved Hide resolved

path = str(path)
file_system, path = fs.FileSystem.from_uri(path)

if isinstance(file_system, fs.LocalFileSystem):
paths = glob.glob(path)
if not paths:
raise FileNotFoundError(f"No files found at {path!r}")
else:
paths = [path]

pyarrow_tables = []
for path in paths:
with file_system.open_input_file(path) as f:
pyarrow_table = pcsv.read_csv(
f,
read_options=read_options,
parse_options=parse_options,
convert_options=convert_options,
memory_pool=memory_pool,
)
pyarrow_tables.append(pyarrow_table)

pyarrow_table = pa.concat_tables(pyarrow_tables)
table_name = table_name or util.gen_name("read_csv")
self.create_table(table_name, pyarrow_table)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hm, I think this should probably be a temp table or a memtable, because none of our other read_* functions create a persistent object

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

memtable is probably a good option

return self.table(table_name)

def _cached(self, expr: ir.Table):
"""Cache the provided expression.

Expand Down
49 changes: 31 additions & 18 deletions ibis/backends/tests/test_register.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,14 +12,13 @@

import ibis
from ibis.backends.conftest import TEST_TABLES
from ibis.backends.tests.errors import MySQLOperationalError, PyODBCProgrammingError

if TYPE_CHECKING:
from collections.abc import Iterator

import pyarrow as pa

pytestmark = pytest.mark.notimpl(["druid", "exasol", "oracle"])


@contextlib.contextmanager
def pushd(new_dir):
Expand Down Expand Up @@ -98,6 +97,7 @@ def gzip_csv(data_dir, tmp_path):
"trino",
]
)
@pytest.mark.notimpl(["druid", "exasol", "oracle"])
def test_register_csv(con, data_dir, fname, in_table_name, out_table_name):
with pushd(data_dir / "csv"):
with pytest.warns(FutureWarning, match="v9.1"):
Expand All @@ -109,7 +109,7 @@ def test_register_csv(con, data_dir, fname, in_table_name, out_table_name):


# TODO: rewrite or delete test when register api is removed
@pytest.mark.notimpl(["datafusion"])
@pytest.mark.notimpl(["datafusion", "druid", "exasol", "oracle"])
@pytest.mark.notyet(
[
"bigquery",
Expand Down Expand Up @@ -153,6 +153,7 @@ def test_register_csv_gz(con, data_dir, gzip_csv):
"trino",
]
)
@pytest.mark.notimpl(["druid", "exasol", "oracle"])
def test_register_with_dotted_name(con, data_dir, tmp_path):
basename = "foo.bar.baz/diamonds.csv"
f = tmp_path.joinpath(basename)
Expand Down Expand Up @@ -212,6 +213,7 @@ def read_table(path: Path) -> Iterator[tuple[str, pa.Table]]:
"trino",
]
)
@pytest.mark.notimpl(["druid", "exasol", "oracle"])
def test_register_parquet(
con, tmp_path, data_dir, fname, in_table_name, out_table_name
):
Expand Down Expand Up @@ -252,6 +254,7 @@ def test_register_parquet(
"trino",
]
)
@pytest.mark.notimpl(["druid", "exasol", "oracle"])
def test_register_iterator_parquet(
con,
tmp_path,
Expand Down Expand Up @@ -280,7 +283,7 @@ def test_register_iterator_parquet(
# TODO: remove entirely when `register` is removed
# This same functionality is implemented across all backends
# via `create_table` and tested in `test_client.py`
@pytest.mark.notimpl(["datafusion"])
@pytest.mark.notimpl(["datafusion", "druid", "exasol", "oracle"])
@pytest.mark.notyet(
[
"bigquery",
Expand Down Expand Up @@ -316,7 +319,7 @@ def test_register_pandas(con):
# TODO: remove entirely when `register` is removed
# This same functionality is implemented across all backends
# via `create_table` and tested in `test_client.py`
@pytest.mark.notimpl(["datafusion", "polars"])
@pytest.mark.notimpl(["datafusion", "polars", "druid", "exasol", "oracle"])
@pytest.mark.notyet(
[
"bigquery",
Expand Down Expand Up @@ -361,6 +364,7 @@ def test_register_pyarrow_tables(con):
"trino",
]
)
@pytest.mark.notimpl(["druid", "exasol", "oracle"])
def test_csv_reregister_schema(con, tmp_path):
foo = tmp_path.joinpath("foo.csv")
with foo.open("w", newline="") as csvfile:
Expand Down Expand Up @@ -390,10 +394,12 @@ def test_csv_reregister_schema(con, tmp_path):
"clickhouse",
"dask",
"datafusion",
"flink",
"druid",
"exasol" "flink",
jitingxu1 marked this conversation as resolved.
Show resolved Hide resolved
"impala",
"mysql",
"mssql",
"oracle",
"pandas",
"polars",
"postgres",
Expand Down Expand Up @@ -428,6 +434,7 @@ def test_register_garbage(con, monkeypatch):
@pytest.mark.notyet(
["flink", "impala", "mssql", "mysql", "postgres", "risingwave", "sqlite", "trino"]
)
@pytest.mark.notimpl(["druid", "exasol", "oracle"])
def test_read_parquet(con, tmp_path, data_dir, fname, in_table_name):
pq = pytest.importorskip("pyarrow.parquet")

Expand Down Expand Up @@ -469,6 +476,7 @@ def ft_data(data_dir):
"trino",
]
)
@pytest.mark.notimpl(["druid", "exasol", "oracle"])
def test_read_parquet_glob(con, tmp_path, ft_data):
pq = pytest.importorskip("pyarrow.parquet")

Expand All @@ -488,16 +496,12 @@ def test_read_parquet_glob(con, tmp_path, ft_data):
@pytest.mark.notyet(
[
"flink",
"impala",
"mssql",
"mysql",
"pandas",
"postgres",
"risingwave",
"sqlite",
"trino",
]
)
@pytest.mark.notimpl(["druid"])
@pytest.mark.notimpl(["mssql"], raises=PyODBCProgrammingError)
@pytest.mark.notimpl(["mysql"], raises=MySQLOperationalError)
def test_read_csv_glob(con, tmp_path, ft_data):
pc = pytest.importorskip("pyarrow.csv")

Expand Down Expand Up @@ -534,6 +538,7 @@ def test_read_csv_glob(con, tmp_path, ft_data):
raises=ValueError,
reason="read_json() missing required argument: 'schema'",
)
@pytest.mark.notimpl(["druid", "exasol", "oracle"])
def test_read_json_glob(con, tmp_path, ft_data):
nrows = len(ft_data)
ntables = 2
Expand Down Expand Up @@ -577,14 +582,22 @@ def num_diamonds(data_dir):
"in_table_name",
[param(None, id="default"), param("fancy_stones", id="file_name")],
)
@pytest.mark.notyet(
["flink", "impala", "mssql", "mysql", "postgres", "risingwave", "sqlite", "trino"]
)
@pytest.mark.notyet(["flink"])
@pytest.mark.notimpl(["druid", "exasol", "oracle"])
def test_read_csv(con, data_dir, in_table_name, num_diamonds):
fname = "diamonds.csv"
with pushd(data_dir / "csv"):
if con.name == "pyspark":
# pyspark doesn't respect CWD
if con.name in (
"pyspark",
"sqlite",
"mysql",
"postgres",
"risingwave",
"impala",
"mssql",
"trino",
):
jitingxu1 marked this conversation as resolved.
Show resolved Hide resolved
# backend doesn't respect CWD
fname = str(Path(fname).absolute())
table = con.read_csv(fname, table_name=in_table_name)

Expand Down
Loading