Skip to content

Commit

Permalink
chore(lint): update Ruff ignores for project conventions and maintain…
Browse files Browse the repository at this point in the history
…ability (#1184)

- Added new ignores from flake8-bugbear (`B007`, `B008`)
- Ignored `C901` (high function complexity) for now, pending review
- Maintained PyTorch conventions (`N812`, `N817`)
- Allowed `E731` (lambda assignments) for flexibility
- Consolidated existing ignores (`E402`, `E501`, `F405`, `C408`, `N812`)
- Documented rationale for each ignored rule

This keeps our linting aligned with project needs while tracking
potential fixes.

Signed-off-by: Sébastien Han <[email protected]>

Signed-off-by: Sébastien Han <[email protected]>
  • Loading branch information
leseb authored Feb 28, 2025
1 parent 3b57d8e commit 6fa257b
Show file tree
Hide file tree
Showing 33 changed files with 113 additions and 145 deletions.
2 changes: 1 addition & 1 deletion llama_stack/cli/stack/_build.py
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,7 @@ def run_stack_build_command(args: argparse.Namespace) -> None:
completer=WordCompleter(available_providers),
complete_while_typing=True,
validator=Validator.from_callable(
lambda x: x in available_providers,
lambda x: x in available_providers, # noqa: B023 - see https://github.com/astral-sh/ruff/issues/7847
error_message="Invalid provider, use <TAB> to see options",
),
)
Expand Down
2 changes: 1 addition & 1 deletion llama_stack/cli/tests/test_stack_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ def test_parse_and_maybe_upgrade_config_old_format(old_config):

inference_providers = result.providers["inference"]
assert len(inference_providers) == 2
assert set(x.provider_id for x in inference_providers) == {
assert {x.provider_id for x in inference_providers} == {
"remote::ollama-00",
"meta-reference-01",
}
Expand Down
4 changes: 2 additions & 2 deletions llama_stack/distribution/distribution.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@


def stack_apis() -> List[Api]:
return [v for v in Api]
return list(Api)


class AutoRoutedApiInfo(BaseModel):
Expand Down Expand Up @@ -55,7 +55,7 @@ def builtin_automatically_routed_apis() -> List[AutoRoutedApiInfo]:


def providable_apis() -> List[Api]:
routing_table_apis = set(x.routing_table_api for x in builtin_automatically_routed_apis())
routing_table_apis = {x.routing_table_api for x in builtin_automatically_routed_apis()}
return [api for api in Api if api not in routing_table_apis and api != Api.inspect]


Expand Down
4 changes: 2 additions & 2 deletions llama_stack/distribution/resolver.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,8 +115,8 @@ async def resolve_impls(
- flatmaps, sorts and resolves the providers in dependency order
- for each API, produces either a (local, passthrough or router) implementation
"""
routing_table_apis = set(x.routing_table_api for x in builtin_automatically_routed_apis())
router_apis = set(x.router_api for x in builtin_automatically_routed_apis())
routing_table_apis = {x.routing_table_api for x in builtin_automatically_routed_apis()}
router_apis = {x.router_api for x in builtin_automatically_routed_apis()}

providers_with_specs = {}

Expand Down
2 changes: 1 addition & 1 deletion llama_stack/distribution/ui/page/playground/rag.py
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ def rag_chat_page():
dict(
name="builtin::rag/knowledge_search",
args={
"vector_db_ids": [vector_db_id for vector_db_id in selected_vector_dbs],
"vector_db_ids": list(selected_vector_dbs),
},
)
],
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -797,10 +797,10 @@ async def _get_tool_defs(
self, toolgroups_for_turn: Optional[List[AgentToolGroup]] = None
) -> Tuple[List[ToolDefinition], Dict[str, str]]:
# Determine which tools to include
agent_config_toolgroups = set(
(toolgroup.name if isinstance(toolgroup, AgentToolGroupWithArgs) else toolgroup)
agent_config_toolgroups = {
toolgroup.name if isinstance(toolgroup, AgentToolGroupWithArgs) else toolgroup
for toolgroup in self.agent_config.toolgroups
)
}
toolgroups_for_turn_set = (
agent_config_toolgroups
if toolgroups_for_turn is None
Expand Down
1 change: 0 additions & 1 deletion llama_stack/providers/inline/eval/meta_reference/eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,6 @@ async def run_eval(
) -> Job:
task_def = self.benchmarks[benchmark_id]
dataset_id = task_def.dataset_id
candidate = task_config.eval_candidate
scoring_functions = task_def.scoring_functions
dataset_def = await self.datasets_api.get_dataset(dataset_id=dataset_id)
validate_dataset_schema(dataset_def.dataset_schema, get_valid_schemas(Api.eval.value))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -208,7 +208,6 @@ def impl():
logprobs = []
stop_reason = None

tokenizer = self.generator.formatter.tokenizer
for token_result in self.generator.completion(request):
tokens.append(token_result.token)
if token_result.text == "<|eot_id|>":
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -207,7 +207,7 @@ def maybe_parse_message(maybe_json: Optional[str]) -> Optional[ProcessingMessage
return parse_message(maybe_json)
except json.JSONDecodeError:
return None
except ValueError as e:
except ValueError:
return None


Expand Down Expand Up @@ -352,7 +352,7 @@ def run_inference(
if isinstance(obj, TaskResponse):
yield obj.result

except GeneratorExit as e:
except GeneratorExit:
self.request_socket.send(encode_msg(CancelSentinel()))
while True:
obj_json = self.request_socket.send()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,9 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed in accordance with the terms of the Llama 3 Community License Agreement.

# The file gets a special treatment for now?
# ruff: noqa: N803

import unittest

import torch
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -264,7 +264,7 @@ async def _setup_model(
)

self.adapter_params = get_adapter_params(model)
self._is_dora = any(["magnitude" in k for k in self.adapter_params.keys()])
self._is_dora = any("magnitude" in k for k in self.adapter_params.keys())

set_trainable_params(model, self.adapter_params)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ async def initialize(self) -> None: ...
async def shutdown(self) -> None: ...

async def list_scoring_functions(self) -> List[ScoringFn]:
scoring_fn_defs_list = [x for x in self.supported_fn_defs_registry.values()]
scoring_fn_defs_list = list(self.supported_fn_defs_registry.values())
for f in scoring_fn_defs_list:
assert f.identifier.startswith("braintrust"), (
"All braintrust scoring fn must have identifier prefixed with 'braintrust'! "
Expand Down
2 changes: 1 addition & 1 deletion llama_stack/providers/remote/inference/nvidia/nvidia.py
Original file line number Diff line number Diff line change
Expand Up @@ -198,7 +198,7 @@ async def chat_completion(
tool_config: Optional[ToolConfig] = None,
) -> Union[ChatCompletionResponse, AsyncIterator[ChatCompletionResponseStreamChunk]]:
if tool_prompt_format:
warnings.warn("tool_prompt_format is not supported by NVIDIA NIM, ignoring")
warnings.warn("tool_prompt_format is not supported by NVIDIA NIM, ignoring", stacklevel=2)

await check_health(self._config) # this raises errors

Expand Down
4 changes: 2 additions & 2 deletions llama_stack/providers/remote/inference/nvidia/openai_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ async def convert_chat_completion_request(
payload.update(temperature=strategy.temperature)
elif isinstance(strategy, TopKSamplingStrategy):
if strategy.top_k != -1 and strategy.top_k < 1:
warnings.warn("top_k must be -1 or >= 1")
warnings.warn("top_k must be -1 or >= 1", stacklevel=2)
nvext.update(top_k=strategy.top_k)
elif isinstance(strategy, GreedySamplingStrategy):
nvext.update(top_k=-1)
Expand Down Expand Up @@ -168,7 +168,7 @@ def convert_completion_request(
payload.update(top_p=request.sampling_params.top_p)
elif request.sampling_params.strategy == "top_k":
if request.sampling_params.top_k != -1 and request.sampling_params.top_k < 1:
warnings.warn("top_k must be -1 or >= 1")
warnings.warn("top_k must be -1 or >= 1", stacklevel=2)
nvext.update(top_k=request.sampling_params.top_k)
elif request.sampling_params.strategy == "greedy":
nvext.update(top_k=-1)
Expand Down
9 changes: 3 additions & 6 deletions llama_stack/providers/tests/eval/test_eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,12 +39,11 @@ async def test_benchmarks_list(self, eval_stack):

@pytest.mark.asyncio
async def test_eval_evaluate_rows(self, eval_stack, inference_model, judge_model):
eval_impl, benchmarks_impl, datasetio_impl, datasets_impl, models_impl = (
eval_impl, benchmarks_impl, datasetio_impl, datasets_impl = (
eval_stack[Api.eval],
eval_stack[Api.benchmarks],
eval_stack[Api.datasetio],
eval_stack[Api.datasets],
eval_stack[Api.models],
)

await register_dataset(datasets_impl, for_generation=True, dataset_id="test_dataset_for_eval")
Expand Down Expand Up @@ -92,11 +91,10 @@ async def test_eval_evaluate_rows(self, eval_stack, inference_model, judge_model

@pytest.mark.asyncio
async def test_eval_run_eval(self, eval_stack, inference_model, judge_model):
eval_impl, benchmarks_impl, datasets_impl, models_impl = (
eval_impl, benchmarks_impl, datasets_impl = (
eval_stack[Api.eval],
eval_stack[Api.benchmarks],
eval_stack[Api.datasets],
eval_stack[Api.models],
)

await register_dataset(datasets_impl, for_generation=True, dataset_id="test_dataset_for_eval")
Expand Down Expand Up @@ -131,11 +129,10 @@ async def test_eval_run_eval(self, eval_stack, inference_model, judge_model):

@pytest.mark.asyncio
async def test_eval_run_benchmark_eval(self, eval_stack, inference_model):
eval_impl, benchmarks_impl, datasets_impl, models_impl = (
eval_impl, benchmarks_impl, datasets_impl = (
eval_stack[Api.eval],
eval_stack[Api.benchmarks],
eval_stack[Api.datasets],
eval_stack[Api.models],
)

response = await datasets_impl.list_datasets()
Expand Down
90 changes: 42 additions & 48 deletions llama_stack/providers/tests/report.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,54 +18,48 @@
INFERENCE_APIS = ["chat_completion"]
FUNCTIONALITIES = ["streaming", "structured_output", "tool_calling"]
SUPPORTED_MODELS = {
"ollama": set(
[
CoreModelId.llama3_1_8b_instruct.value,
CoreModelId.llama3_1_8b_instruct.value,
CoreModelId.llama3_1_70b_instruct.value,
CoreModelId.llama3_1_70b_instruct.value,
CoreModelId.llama3_1_405b_instruct.value,
CoreModelId.llama3_1_405b_instruct.value,
CoreModelId.llama3_2_1b_instruct.value,
CoreModelId.llama3_2_1b_instruct.value,
CoreModelId.llama3_2_3b_instruct.value,
CoreModelId.llama3_2_3b_instruct.value,
CoreModelId.llama3_2_11b_vision_instruct.value,
CoreModelId.llama3_2_11b_vision_instruct.value,
CoreModelId.llama3_2_90b_vision_instruct.value,
CoreModelId.llama3_2_90b_vision_instruct.value,
CoreModelId.llama3_3_70b_instruct.value,
CoreModelId.llama_guard_3_8b.value,
CoreModelId.llama_guard_3_1b.value,
]
),
"fireworks": set(
[
CoreModelId.llama3_1_8b_instruct.value,
CoreModelId.llama3_1_70b_instruct.value,
CoreModelId.llama3_1_405b_instruct.value,
CoreModelId.llama3_2_1b_instruct.value,
CoreModelId.llama3_2_3b_instruct.value,
CoreModelId.llama3_2_11b_vision_instruct.value,
CoreModelId.llama3_2_90b_vision_instruct.value,
CoreModelId.llama3_3_70b_instruct.value,
CoreModelId.llama_guard_3_8b.value,
CoreModelId.llama_guard_3_11b_vision.value,
]
),
"together": set(
[
CoreModelId.llama3_1_8b_instruct.value,
CoreModelId.llama3_1_70b_instruct.value,
CoreModelId.llama3_1_405b_instruct.value,
CoreModelId.llama3_2_3b_instruct.value,
CoreModelId.llama3_2_11b_vision_instruct.value,
CoreModelId.llama3_2_90b_vision_instruct.value,
CoreModelId.llama3_3_70b_instruct.value,
CoreModelId.llama_guard_3_8b.value,
CoreModelId.llama_guard_3_11b_vision.value,
]
),
"ollama": {
CoreModelId.llama3_1_8b_instruct.value,
CoreModelId.llama3_1_8b_instruct.value,
CoreModelId.llama3_1_70b_instruct.value,
CoreModelId.llama3_1_70b_instruct.value,
CoreModelId.llama3_1_405b_instruct.value,
CoreModelId.llama3_1_405b_instruct.value,
CoreModelId.llama3_2_1b_instruct.value,
CoreModelId.llama3_2_1b_instruct.value,
CoreModelId.llama3_2_3b_instruct.value,
CoreModelId.llama3_2_3b_instruct.value,
CoreModelId.llama3_2_11b_vision_instruct.value,
CoreModelId.llama3_2_11b_vision_instruct.value,
CoreModelId.llama3_2_90b_vision_instruct.value,
CoreModelId.llama3_2_90b_vision_instruct.value,
CoreModelId.llama3_3_70b_instruct.value,
CoreModelId.llama_guard_3_8b.value,
CoreModelId.llama_guard_3_1b.value,
},
"fireworks": {
CoreModelId.llama3_1_8b_instruct.value,
CoreModelId.llama3_1_70b_instruct.value,
CoreModelId.llama3_1_405b_instruct.value,
CoreModelId.llama3_2_1b_instruct.value,
CoreModelId.llama3_2_3b_instruct.value,
CoreModelId.llama3_2_11b_vision_instruct.value,
CoreModelId.llama3_2_90b_vision_instruct.value,
CoreModelId.llama3_3_70b_instruct.value,
CoreModelId.llama_guard_3_8b.value,
CoreModelId.llama_guard_3_11b_vision.value,
},
"together": {
CoreModelId.llama3_1_8b_instruct.value,
CoreModelId.llama3_1_70b_instruct.value,
CoreModelId.llama3_1_405b_instruct.value,
CoreModelId.llama3_2_3b_instruct.value,
CoreModelId.llama3_2_11b_vision_instruct.value,
CoreModelId.llama3_2_90b_vision_instruct.value,
CoreModelId.llama3_3_70b_instruct.value,
CoreModelId.llama_guard_3_8b.value,
CoreModelId.llama_guard_3_11b_vision.value,
},
}


Expand Down
6 changes: 0 additions & 6 deletions llama_stack/providers/tests/scoring/test_scoring.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,13 +45,11 @@ async def test_scoring_score(self, scoring_stack):
scoring_functions_impl,
datasetio_impl,
datasets_impl,
models_impl,
) = (
scoring_stack[Api.scoring],
scoring_stack[Api.scoring_functions],
scoring_stack[Api.datasetio],
scoring_stack[Api.datasets],
scoring_stack[Api.models],
)
scoring_fns_list = await scoring_functions_impl.list_scoring_functions()
provider_id = scoring_fns_list[0].provider_id
Expand Down Expand Up @@ -102,13 +100,11 @@ async def test_scoring_score_with_params_llm_as_judge(
scoring_functions_impl,
datasetio_impl,
datasets_impl,
models_impl,
) = (
scoring_stack[Api.scoring],
scoring_stack[Api.scoring_functions],
scoring_stack[Api.datasetio],
scoring_stack[Api.datasets],
scoring_stack[Api.models],
)
await register_dataset(datasets_impl, for_rag=True)
response = await datasets_impl.list_datasets()
Expand Down Expand Up @@ -163,13 +159,11 @@ async def test_scoring_score_with_aggregation_functions(
scoring_functions_impl,
datasetio_impl,
datasets_impl,
models_impl,
) = (
scoring_stack[Api.scoring],
scoring_stack[Api.scoring_functions],
scoring_stack[Api.datasetio],
scoring_stack[Api.datasets],
scoring_stack[Api.models],
)
await register_dataset(datasets_impl, for_rag=True)
rows = await datasetio_impl.get_rows_paginated(
Expand Down
6 changes: 4 additions & 2 deletions llama_stack/providers/utils/inference/openai_compat.py
Original file line number Diff line number Diff line change
Expand Up @@ -605,7 +605,7 @@ def convert_tool_call(
tool_name=tool_call.function.name,
arguments=json.loads(tool_call.function.arguments),
)
except Exception as e:
except Exception:
return UnparseableToolCall(
call_id=tool_call.id or "",
tool_name=tool_call.function.name or "",
Expand Down Expand Up @@ -876,7 +876,9 @@ async def convert_openai_chat_completion_stream(
# it is possible to have parallel tool calls in stream, but
# ChatCompletionResponseEvent only supports one per stream
if len(choice.delta.tool_calls) > 1:
warnings.warn("multiple tool calls found in a single delta, using the first, ignoring the rest")
warnings.warn(
"multiple tool calls found in a single delta, using the first, ignoring the rest", stacklevel=2
)

if not enable_incremental_tool_calls:
yield ChatCompletionResponseStreamChunk(
Expand Down
2 changes: 1 addition & 1 deletion llama_stack/providers/utils/kvstore/redis/redis.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ async def get(self, key: str) -> Optional[str]:
value = await self.redis.get(key)
if value is None:
return None
ttl = await self.redis.ttl(key)
await self.redis.ttl(key)
return value

async def delete(self, key: str) -> None:
Expand Down
2 changes: 1 addition & 1 deletion llama_stack/providers/utils/scoring/aggregation_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ def aggregate_categorical_count(
scoring_results: List[ScoringResultRow],
) -> Dict[str, Any]:
scores = [str(r["score"]) for r in scoring_results]
unique_scores = sorted(list(set(scores)))
unique_scores = sorted(set(scores))
return {"categorical_count": {s: scores.count(s) for s in unique_scores}}


Expand Down
2 changes: 1 addition & 1 deletion llama_stack/providers/utils/scoring/base_scoring_fn.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ def __str__(self) -> str:
return self.__class__.__name__

def get_supported_scoring_fn_defs(self) -> List[ScoringFn]:
return [x for x in self.supported_fn_defs_registry.values()]
return list(self.supported_fn_defs_registry.values())

def register_scoring_fn_def(self, scoring_fn: ScoringFn) -> None:
if scoring_fn.identifier in self.supported_fn_defs_registry:
Expand Down
2 changes: 1 addition & 1 deletion llama_stack/scripts/distro_codegen.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ def collect_template_dependencies(template_dir: Path) -> tuple[str | None, list[
template = template_func()
normal_deps, special_deps = get_provider_dependencies(template.providers)
# Combine all dependencies in order: normal deps, special deps, server deps
all_deps = sorted(list(set(normal_deps + SERVER_DEPENDENCIES))) + sorted(list(set(special_deps)))
all_deps = sorted(set(normal_deps + SERVER_DEPENDENCIES)) + sorted(set(special_deps))

return template.name, all_deps
except Exception:
Expand Down
Loading

0 comments on commit 6fa257b

Please sign in to comment.