Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
99 changes: 50 additions & 49 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -22,17 +22,17 @@ classifiers = [
]
dynamic = ["version"]
dependencies = [
"protobuf>=5",
"cel-python==0.2.*",
# We need at least this version, which started publishing wheels for Python 3.14.
# Ref: https://github.com/google/re2/issues/580
"google-re2>=1.1.20251105; python_version == '3.14'",
# We need at least this version, which started publishing wheels for Python 3.13.
# Ref: https://github.com/google/re2/issues/516
"google-re2>=1.1.20250722; python_version == '3.13'",
# 1.1 started supporting 3.12.
"google-re2>=1.1; python_version == '3.12'",
"google-re2>=1",
"protobuf>=5",
"cel-python==0.2.*",
# We need at least this version, which started publishing wheels for Python 3.14.
# Ref: https://github.com/google/re2/issues/580
"google-re2>=1.1.20251105; python_version == '3.14'",
# We need at least this version, which started publishing wheels for Python 3.13.
# Ref: https://github.com/google/re2/issues/516
"google-re2>=1.1.20250722; python_version == '3.13'",
# 1.1 started supporting 3.12.
"google-re2>=1.1; python_version == '3.12'",
"google-re2>=1",
Comment on lines -25 to +35
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

we had some two space, four space, and tab indentation in this file; just making it all two spaces.

]

[project.urls]
Expand All @@ -42,11 +42,11 @@ Issues = "https://github.com/bufbuild/protovalidate-python/issues"

[dependency-groups]
dev = [
"google-re2-stubs>=0.1.1",
"mypy>=1.17.1",
"pytest>=8.4.1",
"ruff>=0.12.0",
"types-protobuf>=5.29.1.20250315",
"google-re2-stubs>=0.1.1",
"mypy>=1.17.1",
"pytest>=9.0.2",
"ruff>=0.12.0",
"types-protobuf>=5.29.1.20250315",
]

[tool.uv]
Expand All @@ -59,40 +59,40 @@ raw-options = { fallback_version = "0.0.0" }
[tool.ruff]
line-length = 120
lint.select = [
"A",
"ARG",
"B",
"C",
"DTZ",
"E",
"EM",
"F",
"FBT",
"I",
"ICN",
"N",
"PLC",
"PLE",
"PLR",
"PLW",
"Q",
"RUF",
"S",
"T",
"TID",
"UP",
"W",
"YTT",
"A",
"ARG",
"B",
"C",
"DTZ",
"E",
"EM",
"F",
"FBT",
"I",
"ICN",
"N",
"PLC",
"PLE",
"PLR",
"PLW",
"Q",
"RUF",
"S",
"T",
"TID",
"UP",
"W",
"YTT",
]
lint.ignore = [
# Ignore complexity
"C901",
"PLR0911",
"PLR0912",
"PLR0913",
"PLR0915",
# Ignore magic values - in this library, most are obvious in context.
"PLR2004",
# Ignore complexity
"C901",
"PLR0911",
"PLR0912",
"PLR0913",
"PLR0915",
# Ignore magic values - in this library, most are obvious in context.
"PLR2004",
]

[tool.ruff.lint.isort]
Expand All @@ -105,7 +105,8 @@ ban-relative-imports = "all"
# Tests can use assertions.
"test/**/*" = ["S101"]

[tool.pytest.ini_options]
[tool.pytest]
strict = true
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

enabling strict = true meant that the parameterization ids for the test changed above were not unique, so I switched those to use subtests instead.

# restrict testpaths to speed up test discovery
testpaths = ["test"]

Expand Down
54 changes: 28 additions & 26 deletions test/test_format.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,34 +101,36 @@ def get_eval_error_message(test: simple_pb2.SimpleTest) -> str | None:
env = celpy.Environment(runner_class=InterpretedRunner)


@pytest.mark.parametrize("format_test", _format_tests)
def test_format_successes(format_test):
def test_format_successes(subtests: pytest.Subtests):
"""Tests success scenarios for string.format"""
if format_test.name in skipped_tests:
pytest.skip(f"skipped test: {format_test.name}")
ast = env.compile(format_test.expr)
prog = env.program(ast, functions=extra_func.make_extra_funcs())
for format_test in _format_tests:
with subtests.test(msg=format_test.name):
if format_test.name in skipped_tests:
pytest.skip(f"skipped test: {format_test.name}")
ast = env.compile(format_test.expr)
prog = env.program(ast, functions=extra_func.make_extra_funcs())

bindings = build_variables(format_test.bindings)
result = prog.evaluate(bindings)
expected = get_expected_result(format_test)
assert expected is not None, f"[{format_test.name}]: expected a success result to be defined"
assert result == expected
bindings = build_variables(format_test.bindings)
result = prog.evaluate(bindings)
expected = get_expected_result(format_test)
assert expected is not None, f"[{format_test.name}]: expected a success result to be defined"
assert result == expected


@pytest.mark.parametrize("format_error_test", _format_error_tests)
def test_format_errors(format_error_test):
def test_format_errors(subtests: pytest.Subtests):
"""Tests error scenarios for string.format"""
if format_error_test.name in skipped_error_tests:
pytest.skip(f"skipped test: {format_error_test.name}")
ast = env.compile(format_error_test.expr)
prog = env.program(ast, functions=extra_func.make_extra_funcs())

bindings = build_variables(format_error_test.bindings)
try:
prog.evaluate(bindings)
pytest.fail(f"[{format_error_test.name}]: expected an error to be raised during evaluation")
except celpy.CELEvalError as e:
msg = get_eval_error_message(format_error_test)
assert msg is not None, f"[{format_error_test.name}]: expected an eval error to be defined"
assert str(e) == msg
for format_error_test in _format_error_tests:
with subtests.test(msg=format_error_test.name):
if format_error_test.name in skipped_error_tests:
pytest.skip(f"skipped test: {format_error_test.name}")
ast = env.compile(format_error_test.expr)
prog = env.program(ast, functions=extra_func.make_extra_funcs())

bindings = build_variables(format_error_test.bindings)
try:
prog.evaluate(bindings)
pytest.fail(f"[{format_error_test.name}]: expected an error to be raised during evaluation")
except celpy.CELEvalError as e:
msg = get_eval_error_message(format_error_test)
assert msg is not None, f"[{format_error_test.name}]: expected an eval error to be defined"
assert str(e) == msg
2 changes: 1 addition & 1 deletion uv.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.