remove unused code, nocover some debug arms

This commit is contained in:
sneakers-the-rat 2024-09-11 19:30:04 -07:00
parent d31ac29294
commit bb59c9d465
Signed by untrusted user who does not match committer: jonny
GPG key ID: 6DCB96EF1E4D232D
7 changed files with 39 additions and 70 deletions

View file

@ -174,7 +174,7 @@ class AttributeAdapter(Adapter):
"""
map = self.match()
res = map.apply(self.cls)
if self.debug:
if self.debug: # pragma: no cover - only used in development
res = self._amend_debug(res, map)
return res
@ -203,7 +203,7 @@ class AttributeAdapter(Adapter):
def _amend_debug(
self, res: BuildResult, map: Optional[Type[AttributeMap]] = None
) -> BuildResult:
) -> BuildResult: # pragma: no cover - only used in development
map_name = "None" if map is None else map.__name__
for cls in res.classes:
cls.annotations["attribute_map"] = {"tag": "attribute_map", "value": map_name}

View file

@ -92,7 +92,7 @@ class ClassAdapter(Adapter):
# Get vanilla top-level attributes
kwargs["attributes"].extend(self.build_attrs(self.cls))
if self.debug:
if self.debug: # pragma: no cover - only used in development
kwargs["annotations"] = {}
kwargs["annotations"]["group_adapter"] = {
"tag": "group_adapter",
@ -254,6 +254,6 @@ class ClassAdapter(Adapter):
inlined=True,
**QUANTITY_MAP[self.cls.quantity],
)
if self.debug:
if self.debug: # pragma: no cover - only used in development
slot.annotations["group_adapter"] = {"tag": "group_adapter", "value": "self_slot"}
return slot

View file

@ -744,7 +744,7 @@ class DatasetAdapter(ClassAdapter):
if map is not None:
res = map.apply(self.cls, res, self._get_full_name())
if self.debug:
if self.debug: # pragma: no cover - only used in development
res = self._amend_debug(res, map)
return res
@ -771,7 +771,9 @@ class DatasetAdapter(ClassAdapter):
else:
return matches[0]
def _amend_debug(self, res: BuildResult, map: Optional[Type[DatasetMap]] = None) -> BuildResult:
def _amend_debug(
self, res: BuildResult, map: Optional[Type[DatasetMap]] = None
) -> BuildResult: # pragma: no cover - only used in development
map_name = "None" if map is None else map.__name__
for cls in res.classes:
cls.annotations["dataset_map"] = {"tag": "dataset_map", "value": map_name}

View file

@ -70,7 +70,7 @@ class GroupAdapter(ClassAdapter):
annotations = [{"tag": "source_type", "value": "link"}]
if self.debug:
if self.debug: # pragma: no cover - only used in development
annotations.append({"tag": "group_adapter", "value": "link"})
slots = [
@ -117,7 +117,7 @@ class GroupAdapter(ClassAdapter):
inlined_as_list=False,
)
if self.debug:
if self.debug: # pragma: no cover - only used in development
slot.annotations["group_adapter"] = {"tag": "group_adapter", "value": "container_group"}
if self.parent is not None:
@ -162,7 +162,7 @@ class GroupAdapter(ClassAdapter):
**QUANTITY_MAP[cls.quantity],
)
if self.debug:
if self.debug: # pragma: no cover - only used in development
slot.annotations["group_adapter"] = {"tag": "group_adapter", "value": "container_slot"}
return BuildResult(slots=[slot])
@ -214,7 +214,7 @@ class GroupAdapter(ClassAdapter):
inlined_as_list=True,
**QUANTITY_MAP[self.cls.quantity],
)
if self.debug:
if self.debug: # pragma: no cover - only used in development
slot.annotations["group_adapter"] = {"tag": "group_adapter", "value": "container_slot"}
return slot

View file

@ -5,9 +5,7 @@ customized to support NWB models.
See class and module docstrings for details :)
"""
import pdb
import re
import sys
from dataclasses import dataclass, field
from pathlib import Path
from types import ModuleType
@ -24,7 +22,6 @@ from linkml_runtime.linkml_model.meta import (
SlotDefinition,
SlotDefinitionName,
)
from linkml_runtime.utils.compile_python import file_text
from linkml_runtime.utils.formatutils import remove_empty_items
from linkml_runtime.utils.schemaview import SchemaView
@ -214,15 +211,17 @@ class AfterGenerateSlot:
# merge injects/imports from the numpydantic array without using the merge method
if slot.injected_classes is None:
slot.injected_classes = NumpydanticArray.INJECTS.copy()
else:
else: # pragma: no cover - for completeness, shouldn't happen
slot.injected_classes.extend(NumpydanticArray.INJECTS.copy())
if isinstance(slot.imports, list):
if isinstance(
slot.imports, list
): # pragma: no cover - for completeness, shouldn't happen
slot.imports = (
Imports(imports=slot.imports) + NumpydanticArray.IMPORTS.model_copy()
)
elif isinstance(slot.imports, Imports):
slot.imports += NumpydanticArray.IMPORTS.model_copy()
else:
else: # pragma: no cover - for completeness, shouldn't happen
slot.imports = NumpydanticArray.IMPORTS.model_copy()
return slot
@ -239,13 +238,15 @@ class AfterGenerateSlot:
named_injects = [ModelTypeString, _get_name, NamedString]
if slot.injected_classes is None:
slot.injected_classes = named_injects
else:
else: # pragma: no cover - for completeness, shouldn't happen
slot.injected_classes.extend([ModelTypeString, _get_name, NamedString])
if isinstance(slot.imports, list):
if isinstance(
slot.imports, list
): # pragma: no cover - for completeness, shouldn't happen
slot.imports = Imports(imports=slot.imports) + NamedImports
elif isinstance(slot.imports, Imports):
slot.imports += NamedImports
else:
else: # pragma: no cover - for completeness, shouldn't happen
slot.imports = NamedImports
return slot
@ -268,16 +269,20 @@ class AfterGenerateClass:
if cls.cls.name == "DynamicTable":
cls.cls.bases = ["DynamicTableMixin", "ConfiguredBaseModel"]
if cls.injected_classes is None:
if (
cls.injected_classes is None
): # pragma: no cover - for completeness, shouldn't happen
cls.injected_classes = DYNAMIC_TABLE_INJECTS.copy()
else:
cls.injected_classes.extend(DYNAMIC_TABLE_INJECTS.copy())
if isinstance(cls.imports, Imports):
cls.imports += DYNAMIC_TABLE_IMPORTS
elif isinstance(cls.imports, list):
elif isinstance(
cls.imports, list
): # pragma: no cover - for completeness, shouldn't happen
cls.imports = Imports(imports=cls.imports) + DYNAMIC_TABLE_IMPORTS
else:
else: # pragma: no cover - for completeness, shouldn't happen
cls.imports = DYNAMIC_TABLE_IMPORTS.model_copy()
elif cls.cls.name == "VectorData":
cls.cls.bases = ["VectorDataMixin", "ConfiguredBaseModel"]
@ -298,16 +303,20 @@ class AfterGenerateClass:
elif cls.cls.name == "TimeSeriesReferenceVectorData":
# in core.nwb.base, so need to inject and import again
cls.cls.bases = ["TimeSeriesReferenceVectorDataMixin", "VectorData"]
if cls.injected_classes is None:
if (
cls.injected_classes is None
): # pragma: no cover - for completeness, shouldn't happen
cls.injected_classes = TSRVD_INJECTS.copy()
else:
cls.injected_classes.extend(TSRVD_INJECTS.copy())
if isinstance(cls.imports, Imports):
cls.imports += TSRVD_IMPORTS
elif isinstance(cls.imports, list):
elif isinstance(
cls.imports, list
): # pragma: no cover - for completeness, shouldn't happen
cls.imports = Imports(imports=cls.imports) + TSRVD_IMPORTS
else:
else: # pragma: no cover - for completeness, shouldn't happen
cls.imports = TSRVD_IMPORTS.model_copy()
return cls
@ -362,28 +371,6 @@ class AfterGenerateClass:
return cls
def compile_python(
text_or_fn: str, package_path: Path = None, module_name: str = "test"
) -> ModuleType:
"""
Compile the text or file and return the resulting module
@param text_or_fn: Python text or file name that references python file
@param package_path: Root package path. If omitted and we've got a python file,
the package is the containing
directory
@return: Compiled module
"""
python_txt = file_text(text_or_fn)
if package_path is None and python_txt != text_or_fn:
package_path = Path(text_or_fn)
spec = compile(python_txt, "<string>", "exec")
module = ModuleType(module_name)
exec(spec, module.__dict__)
sys.modules[module_name] = module
return module
def wrap_preserving_optional(annotation: str, wrap: str) -> str:
"""
Add a wrapping type to a type annotation string,
@ -401,7 +388,5 @@ def wrap_preserving_optional(annotation: str, wrap: str) -> str:
annotation = is_optional.groups()[0]
annotation = f"Optional[{wrap}[{annotation}]]"
else:
if "Optional" in annotation:
pdb.set_trace()
annotation = f"{wrap}[{annotation}]"
return annotation

View file

@ -1,20 +0,0 @@
"""
Types used with hdf5 io
"""
from typing import Any
from pydantic import GetCoreSchemaHandler
from pydantic_core import CoreSchema, core_schema
class HDF5_Path(str):
"""
Trivial subclass of string to indicate that it is a reference to a location within an HDF5 file
"""
@classmethod
def __get_pydantic_core_schema__(
cls, source_type: Any, handler: GetCoreSchemaHandler
) -> CoreSchema:
return core_schema.no_info_after_validator_function(cls, handler(str))

View file

@ -5,6 +5,8 @@ Note that since this is largely a subclass, we don't test all of the functionali
because it's tested in the base linkml package.
"""
# ruff: noqa: F821 - until the tests here settle down
import re
import sys
import typing
@ -16,7 +18,7 @@ import pytest
from numpydantic.ndarray import NDArrayMeta
from pydantic import BaseModel
from nwb_linkml.generators.pydantic import NWBPydanticGenerator, compile_python
from nwb_linkml.generators.pydantic import NWBPydanticGenerator
from ..fixtures import (
TestSchemas,