Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Set no_implicit_reexport = true in pyproject.toml #4211

Merged
merged 4 commits into from
Aug 3, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 9 additions & 1 deletion py-polars/polars/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ def version() -> str:
warnings.warn("polars binary missing!")

import polars.testing as testing
from polars.cfg import Config, toggle_string_cache # We do not export in __all__
from polars.cfg import Config
from polars.convert import (
from_arrow,
from_dict,
Expand Down Expand Up @@ -122,6 +122,7 @@ def version() -> str:
scan_ipc,
scan_parquet,
)
from polars.string_cache import toggle_string_cache # We do not export in __all__
from polars.string_cache import StringCache
from polars.utils import threadpool_size

Expand All @@ -133,6 +134,8 @@ def version() -> str:
"ArrowError",
"ComputeError",
"NoDataError",
"DuplicateError",
"PanicException",
"DataFrame",
"Series",
"LazyFrame",
Expand All @@ -154,6 +157,7 @@ def version() -> str:
"Date",
"Datetime",
"Time",
"Duration",
"Object",
"Categorical",
"Field",
Expand Down Expand Up @@ -186,6 +190,8 @@ def version() -> str:
"date_range",
"get_dummies",
"repeat",
"element",
"cut",
# polars.internal.lazy_functions
"col",
"count",
Expand Down Expand Up @@ -226,6 +232,8 @@ def version() -> str:
"list", # named to_list, see import above
"select",
"var",
"struct",
"duration",
# polars.convert
"from_dict",
"from_dicts",
Expand Down
33 changes: 32 additions & 1 deletion py-polars/polars/internals/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
from .datatypes import IntoExpr
from .expr import Expr, expr_to_lit_or_expr, selection_to_pyexpr_list, wrap_expr
from .frame import DataFrame, wrap_df
from .functions import concat, date_range # DataFrame.describe() & DataFrame.upsample()
from .functions import concat, date_range
from .io import _is_local_file, _prepare_file_arg, read_ipc_schema, read_parquet_schema
from .lazy_frame import LazyFrame, wrap_ldf
from .lazy_functions import (
Expand All @@ -28,3 +28,34 @@
)
from .series import Series, wrap_s
from .whenthen import when # used in expr.clip()

__all__ = [
Copy link
Contributor

@stinodego stinodego Aug 2, 2022

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why not include argsort_by, date_range, and _deser_and_exec in this list? These are now imported but not available through __all__.

Same for the other __init__ file, there are a few instances missing from __all__, like Null.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Only because mypy didn't complain about them, meaning they are not imported from anywhere else in the codebase. It might better to remove them as imports from the __init__ than to add them to __all__. Thoughts?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

You're right, let's keep that for a different PR!

"DataFrame",
"Expr",
"IntoExpr",
"LazyFrame",
"Series",
"all",
"arg_where",
"col",
"concat",
"concat_list",
"element",
"expr_to_lit_or_expr",
"format",
"lit",
"read_ipc_schema",
"read_parquet_schema",
"select",
"selection_to_pyexpr_list",
"when",
"wrap_df",
"wrap_expr",
"wrap_ldf",
"wrap_s",
"_is_local_file",
"_prepare_file_arg",
"_scan_ds",
"_scan_ipc_fsspec",
"_scan_parquet_fsspec",
]
2 changes: 1 addition & 1 deletion py-polars/polars/internals/frame.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
from polars._html import NotebookFormatter
from polars.datatypes import (
Boolean,
ColumnsType,
DataType,
Int8,
Int16,
Expand All @@ -38,7 +39,6 @@
py_type_to_dtype,
)
from polars.internals.construction import (
ColumnsType,
arrow_to_pydf,
dict_to_pydf,
numpy_to_pydf,
Expand Down
2 changes: 1 addition & 1 deletion py-polars/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ strict_concatenate = true
disallow_untyped_calls = true
warn_redundant_casts = true
# warn_return_any = true
# no_implicit_reexport = true
no_implicit_reexport = true
# strict_equality = true
# TODO: When all flags are enabled, replace by strict = true
enable_error_code = [
Expand Down
2 changes: 1 addition & 1 deletion py-polars/tests/io/test_avro.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
from __future__ import annotations

import io
import os

import pytest

import polars as pl
from polars import io


@pytest.fixture
Expand Down
10 changes: 9 additions & 1 deletion py-polars/tests/test_lazy.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
from __future__ import annotations

from typing import Any, cast

import numpy as np
import pytest
from _pytest.capture import CaptureFixture
Expand Down Expand Up @@ -879,7 +881,13 @@ def test_arithmetic() -> None:

def test_ufunc() -> None:
df = pl.DataFrame({"a": [1, 2]})
out = df.select(np.log(col("a"))) # type: ignore[call-overload]
# NOTE: unfortunately we must use cast instead of a type: ignore comment
# 1. CI job with Python 3.10, numpy==1.23.1 -> mypy complains about arg-type
# 2. so we try to resolve it with type: ignore[arg-type]
# 3. CI job with Python 3.7, numpy==1.21.6 -> mypy complains about
# unused type: ignore comment
# for more information, see: https://github.com/python/mypy/issues/8823
out = df.select(np.log(cast(Any, col("a"))))
assert out["a"][1] == 0.6931471805599453


Expand Down
43 changes: 20 additions & 23 deletions py-polars/tests/test_series.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

import math
from datetime import date, datetime
from typing import Any
from typing import Any, cast
from unittest.mock import patch

import numpy as np
Expand Down Expand Up @@ -365,80 +365,80 @@ def test_ufunc() -> None:
# test if output dtype is calculated correctly.
s_float32 = pl.Series("a", [1.0, 2.0, 3.0, 4.0], dtype=pl.Float32)
assert_series_equal(
np.multiply(s_float32, 4), # type: ignore[arg-type]
cast(pl.Series, np.multiply(s_float32, 4)),
pl.Series("a", [4.0, 8.0, 12.0, 16.0], dtype=pl.Float32),
)

s_float64 = pl.Series("a", [1.0, 2.0, 3.0, 4.0], dtype=pl.Float64)
assert_series_equal(
np.multiply(s_float64, 4), # type: ignore[arg-type]
cast(pl.Series, np.multiply(s_float64, 4)),
pl.Series("a", [4.0, 8.0, 12.0, 16.0], dtype=pl.Float64),
)

s_uint8 = pl.Series("a", [1, 2, 3, 4], dtype=pl.UInt8)
assert_series_equal(
np.power(s_uint8, 2), # type: ignore[arg-type]
cast(pl.Series, np.power(s_uint8, 2)),
pl.Series("a", [1, 4, 9, 16], dtype=pl.UInt8),
)
assert_series_equal(
np.power(s_uint8, 2.0), # type: ignore[arg-type]
cast(pl.Series, np.power(s_uint8, 2.0)),
pl.Series("a", [1.0, 4.0, 9.0, 16.0], dtype=pl.Float64),
)

s_int8 = pl.Series("a", [1, -2, 3, -4], dtype=pl.Int8)
assert_series_equal(
np.power(s_int8, 2), # type: ignore[arg-type]
cast(pl.Series, np.power(s_int8, 2)),
pl.Series("a", [1, 4, 9, 16], dtype=pl.Int8),
)
assert_series_equal(
np.power(s_int8, 2.0), # type: ignore[arg-type]
cast(pl.Series, np.power(s_int8, 2.0)),
pl.Series("a", [1.0, 4.0, 9.0, 16.0], dtype=pl.Float64),
)

s_uint32 = pl.Series("a", [1, 2, 3, 4], dtype=pl.UInt32)
assert_series_equal(
np.power(s_uint32, 2), # type: ignore[arg-type]
cast(pl.Series, np.power(s_uint32, 2)),
pl.Series("a", [1, 4, 9, 16], dtype=pl.UInt32),
)
assert_series_equal(
np.power(s_uint32, 2.0), # type: ignore[arg-type]
cast(pl.Series, np.power(s_uint32, 2.0)),
pl.Series("a", [1.0, 4.0, 9.0, 16.0], dtype=pl.Float64),
)

s_int32 = pl.Series("a", [1, -2, 3, -4], dtype=pl.Int32)
assert_series_equal(
np.power(s_int32, 2), # type: ignore[arg-type]
cast(pl.Series, np.power(s_int32, 2)),
pl.Series("a", [1, 4, 9, 16], dtype=pl.Int32),
)
assert_series_equal(
np.power(s_int32, 2.0), # type: ignore[arg-type]
cast(pl.Series, np.power(s_int32, 2.0)),
pl.Series("a", [1.0, 4.0, 9.0, 16.0], dtype=pl.Float64),
)

s_uint64 = pl.Series("a", [1, 2, 3, 4], dtype=pl.UInt64)
assert_series_equal(
np.power(s_uint64, 2), # type: ignore[arg-type]
cast(pl.Series, np.power(s_uint64, 2)),
pl.Series("a", [1, 4, 9, 16], dtype=pl.UInt64),
)
assert_series_equal(
np.power(s_uint64, 2.0), # type: ignore[arg-type]
cast(pl.Series, np.power(s_uint64, 2.0)),
pl.Series("a", [1.0, 4.0, 9.0, 16.0], dtype=pl.Float64),
)

s_int64 = pl.Series("a", [1, -2, 3, -4], dtype=pl.Int64)
assert_series_equal(
np.power(s_int64, 2), # type: ignore[arg-type]
cast(pl.Series, np.power(s_int64, 2)),
pl.Series("a", [1, 4, 9, 16], dtype=pl.Int64),
)
assert_series_equal(
np.power(s_int64, 2.0), # type: ignore[arg-type]
cast(pl.Series, np.power(s_int64, 2.0)),
pl.Series("a", [1.0, 4.0, 9.0, 16.0], dtype=pl.Float64),
)

# test if null bitmask is preserved
a1 = pl.Series("a", [1.0, None, 3.0])
b1 = np.exp(a1)
assert b1.null_count() == 1 # type: ignore[attr-defined]
b1 = cast(pl.Series, np.exp(a1))
assert b1.null_count() == 1

# test if it works with chunked series.
a2 = pl.Series("a", [1.0, None, 3.0])
Expand All @@ -447,7 +447,7 @@ def test_ufunc() -> None:
assert a2.n_chunks() == 2
c2 = np.multiply(a2, 3)
assert_series_equal(
c2, # type: ignore[arg-type]
cast(pl.Series, c2),
pl.Series("a", [3.0, None, 9.0, 12.0, 15.0, None]),
)

Expand Down Expand Up @@ -1150,15 +1150,12 @@ def test_abs() -> None:
# ints
s = pl.Series([1, -2, 3, -4])
assert_series_equal(s.abs(), pl.Series([1, 2, 3, 4]))
assert_series_equal(np.abs(s), pl.Series([1, 2, 3, 4])) # type: ignore[arg-type]
assert_series_equal(cast(pl.Series, np.abs(s)), pl.Series([1, 2, 3, 4]))

# floats
s = pl.Series([1.0, -2.0, 3, -4.0])
assert_series_equal(s.abs(), pl.Series([1.0, 2.0, 3.0, 4.0]))
assert_series_equal(
np.abs(s), # type: ignore[arg-type]
pl.Series([1.0, 2.0, 3.0, 4.0]),
)
assert_series_equal(cast(pl.Series, np.abs(s)), pl.Series([1.0, 2.0, 3.0, 4.0]))
assert_series_equal(
pl.select(pl.lit(s).abs()).to_series(), pl.Series([1.0, 2.0, 3.0, 4.0])
)
Expand Down