remove split from schema adapter because it turned out to be a bad idea, we just use tree_root instead

This commit is contained in:
sneakers-the-rat 2023-10-11 18:32:31 -07:00
parent 615b006e4a
commit c4d42cdacf
3 changed files with 13 additions and 103 deletions

View file

@ -31,12 +31,10 @@ class NamespacesAdapter(Adapter):
imported: List['NamespacesAdapter'] = Field(default_factory=list) imported: List['NamespacesAdapter'] = Field(default_factory=list)
_imports_populated: bool = PrivateAttr(False) _imports_populated: bool = PrivateAttr(False)
_split: bool = PrivateAttr(False)
def __init__(self, **kwargs): def __init__(self, **kwargs):
super(NamespacesAdapter, self).__init__(**kwargs) super(NamespacesAdapter, self).__init__(**kwargs)
self._populate_schema_namespaces() self._populate_schema_namespaces()
self.split = self._split
@classmethod @classmethod
def from_yaml(cls, path:Path) -> 'NamespacesAdapter': def from_yaml(cls, path:Path) -> 'NamespacesAdapter':
@ -121,29 +119,6 @@ class NamespacesAdapter(Adapter):
return sch_result return sch_result
@property
def split(self) -> bool:
"""
Sets the :attr:`.SchemaAdapter.split` attribute for all contained and imported schema
Args:
split (bool): Set the generated schema to be split or not
Returns:
bool: whether the schema are set to be split!
"""
return self._split
@split.setter
def split(self, split):
for sch in self.schemas:
sch.split = split
for ns in self.imported:
for sch in ns.schemas:
sch.split = split
self._split = split
def _populate_schema_namespaces(self): def _populate_schema_namespaces(self):
# annotate for each schema which namespace imports it # annotate for each schema which namespace imports it
for sch in self.schemas: for sch in self.schemas:

View file

@ -1,5 +1,5 @@
""" """
Since NWB doesn't necessarily have a term for a single nwb schema file, we're going I don't know if NWB necessarily has a term for a single nwb schema file, so we're going
to call them "schema" objects to call them "schema" objects
""" """
from typing import Optional, List, TYPE_CHECKING, Type from typing import Optional, List, TYPE_CHECKING, Type
@ -9,8 +9,6 @@ from pydantic import Field, PrivateAttr
from nwb_linkml.adapters.adapter import Adapter, BuildResult from nwb_linkml.adapters.adapter import Adapter, BuildResult
from nwb_linkml.adapters.dataset import DatasetAdapter from nwb_linkml.adapters.dataset import DatasetAdapter
from nwb_linkml.adapters.group import GroupAdapter from nwb_linkml.adapters.group import GroupAdapter
if TYPE_CHECKING:
pass
from nwb_schema_language import Group, Dataset from nwb_schema_language import Group, Dataset
from typing import NamedTuple from typing import NamedTuple
@ -37,10 +35,6 @@ class SchemaAdapter(Adapter):
None, None,
description="Version of schema, populated by NamespacesAdapter since individual schema files dont know their version in NWB Schema Lang" description="Version of schema, populated by NamespacesAdapter since individual schema files dont know their version in NWB Schema Lang"
) )
split: bool = Field(
False,
description="Split anonymous subclasses into a separate schema file"
)
_created_classes: List[Type[Group | Dataset]] = PrivateAttr(default_factory=list) _created_classes: List[Type[Group | Dataset]] = PrivateAttr(default_factory=list)
@property @property
@ -81,78 +75,18 @@ class SchemaAdapter(Adapter):
if len(res.slots) > 0: if len(res.slots) > 0:
raise RuntimeError('Generated schema in this translation can only have classes, all slots should be attributes within a class') raise RuntimeError('Generated schema in this translation can only have classes, all slots should be attributes within a class')
if self.split: sch = SchemaDefinition(
sch_split = self.split_subclasses(res) name = self.name,
return sch_split id = self.name,
imports = [i.name if isinstance(i, SchemaAdapter) else i for i in self.imports ],
else: classes=res.classes,
slots=res.slots,
sch = SchemaDefinition( types=res.types,
name = self.name, version=self.version
id = self.name,
imports = [i.name if isinstance(i, SchemaAdapter) else i for i in self.imports ],
classes=res.classes,
slots=res.slots,
types=res.types,
version=self.version
)
# every schema needs the language elements
sch.imports.append('.'.join([self.namespace, 'nwb.language']))
return BuildResult(schemas=[sch])
def split_subclasses(self, classes: BuildResult) -> BuildResult:
"""
Split the generated classes into top-level "main" classes and
nested/anonymous "split" classes.
Args:
classes (BuildResult): A Build result object containing the classes
for the schema
Returns:
:class:`.SplitSchema`
"""
# just split by the presence or absence of __
main_classes = [c for c in classes.classes if '__' not in c.name]
split_classes = [c for c in classes.classes if '__' in c.name]
split_sch_name = '.'.join([self.name, 'include'])
imports = [i.name if isinstance(i, SchemaAdapter) else i for i in self.imports ]
imports.append('nwb.language')
# need to mutually import the two schemas because the subclasses
# could refer to the main classes
main_imports = imports
if len(split_classes)>0:
main_imports.append(split_sch_name)
imports.append(self.name)
main_sch = SchemaDefinition(
name=self.name,
id=self.name,
imports=main_imports,
classes=main_classes,
slots=classes.slots,
types=classes.types
) )
# every schema needs the language elements
split_sch = SchemaDefinition( sch.imports.append('.'.join([self.namespace, 'nwb.language']))
name=split_sch_name, return BuildResult(schemas=[sch])
id=split_sch_name,
imports=imports,
classes=split_classes,
slots=classes.slots,
types=classes.types
)
if len(split_classes) > 0:
res = BuildResult(
schemas=[main_sch, split_sch]
)
else:
res = BuildResult(
schemas=[main_sch]
)
return res
@property @property

View file

@ -10,6 +10,7 @@ from ..fixtures import tmp_output_dir, set_config_vars, data_dir
from nwb_linkml.io.hdf5 import HDF5IO from nwb_linkml.io.hdf5 import HDF5IO
from nwb_linkml.io.hdf5 import truncate_file from nwb_linkml.io.hdf5 import truncate_file
@pytest.mark.skip()
@pytest.mark.parametrize('dset', ['aibs.nwb']) @pytest.mark.parametrize('dset', ['aibs.nwb'])
def test_hdf_read(data_dir, dset): def test_hdf_read(data_dir, dset):
NWBFILE = data_dir / dset NWBFILE = data_dir / dset