This commit is contained in:
sneakers-the-rat 2024-07-09 03:32:37 -07:00
parent f4d397cde1
commit b6af8c9718
Signed by untrusted user who does not match committer: jonny
GPG key ID: 6DCB96EF1E4D232D
8 changed files with 31 additions and 23 deletions

View file

@ -1,11 +1,17 @@
"""
Test fixtures primarily for doctests for adapters
"""
import re import re
import textwrap import textwrap
from doctest import NORMALIZE_WHITESPACE, ELLIPSIS from doctest import ELLIPSIS, NORMALIZE_WHITESPACE
from sybil import Document from typing import Generator
from sybil import Sybil, Region
import yaml
from sybil import Document, Example, Region, Sybil
from sybil.parsers.codeblock import PythonCodeBlockParser from sybil.parsers.codeblock import PythonCodeBlockParser
from sybil.parsers.doctest import DocTestParser from sybil.parsers.doctest import DocTestParser
import yaml
from nwb_linkml import adapters from nwb_linkml import adapters
# Test adapter generation examples # Test adapter generation examples
@ -24,7 +30,7 @@ def _strip_nwb(nwb: str) -> str:
return nwb return nwb
def test_adapter_block(example): def test_adapter_block(example: Example) -> None:
""" """
The linkml generated from a nwb example input should match The linkml generated from a nwb example input should match
that provided in the docstring. that provided in the docstring.
@ -44,10 +50,13 @@ def test_adapter_block(example):
assert generated == expected assert generated == expected
def parse_adapter_blocks(document: Document): def parse_adapter_blocks(document: Document) -> Generator[Region, None, None]:
"""
Parse blocks with adapter directives, yield to test with :func:`.test_adapter_block`
"""
for start_match, end_match, source in document.find_region_sources(ADAPTER_START, ADAPTER_END): for start_match, end_match, source in document.find_region_sources(ADAPTER_START, ADAPTER_END):
# parse # parse
sections = re.split(r":\w+?:", source, re.MULTILINE) sections = re.split(r":\w+?:", source, flags=re.MULTILINE)
sections = [textwrap.dedent(section).strip() for section in sections] sections = [textwrap.dedent(section).strip() for section in sections]
sections[1] = _strip_nwb(sections[1]) sections[1] = _strip_nwb(sections[1])
@ -56,9 +65,7 @@ def parse_adapter_blocks(document: Document):
adapter_parser = Sybil( adapter_parser = Sybil(
parsers=[ parsers=[parse_adapter_blocks],
parse_adapter_blocks
],
patterns=["adapters/*.py"], patterns=["adapters/*.py"],
) )

View file

@ -16,6 +16,7 @@ from typing import (
Union, Union,
) )
from linkml_runtime.dumpers import yaml_dumper
from linkml_runtime.linkml_model import ( from linkml_runtime.linkml_model import (
ClassDefinition, ClassDefinition,
Definition, Definition,
@ -23,7 +24,6 @@ from linkml_runtime.linkml_model import (
SlotDefinition, SlotDefinition,
TypeDefinition, TypeDefinition,
) )
from linkml_runtime.dumpers import yaml_dumper
from pydantic import BaseModel from pydantic import BaseModel
from nwb_schema_language import Attribute, Dataset, Group, Schema from nwb_schema_language import Attribute, Dataset, Group, Schema

View file

@ -3,16 +3,15 @@ Adapters to linkML classes
""" """
from abc import abstractmethod from abc import abstractmethod
from typing import Type, TypeVar, List, Optional from typing import List, Optional, Type, TypeVar
from linkml_runtime.linkml_model import ClassDefinition, SlotDefinition from linkml_runtime.linkml_model import ClassDefinition, SlotDefinition
from pydantic import field_validator from pydantic import field_validator
from nwb_linkml.adapters.adapter import Adapter, BuildResult from nwb_linkml.adapters.adapter import Adapter, BuildResult
from nwb_linkml.maps import QUANTITY_MAP from nwb_linkml.maps import QUANTITY_MAP
from nwb_linkml.maps.naming import camel_to_snake from nwb_linkml.maps.naming import camel_to_snake
from nwb_schema_language import CompoundDtype, Dataset, DTypeType, Group, ReferenceDtype, FlatDtype from nwb_schema_language import CompoundDtype, Dataset, DTypeType, FlatDtype, Group, ReferenceDtype
T = TypeVar("T", bound=Type[Dataset] | Type[Group]) T = TypeVar("T", bound=Type[Dataset] | Type[Group])
TI = TypeVar("TI", bound=Dataset | Group) TI = TypeVar("TI", bound=Dataset | Group)

View file

@ -1,6 +1,7 @@
""" """
Adapter for NWB datasets to linkml Classes Adapter for NWB datasets to linkml Classes
""" """
from abc import abstractmethod from abc import abstractmethod
from typing import ClassVar, Optional, Type from typing import ClassVar, Optional, Type
@ -14,7 +15,7 @@ from nwb_linkml.adapters.classes import ClassAdapter
from nwb_linkml.maps import QUANTITY_MAP, Map from nwb_linkml.maps import QUANTITY_MAP, Map
from nwb_linkml.maps.dtype import flat_to_linkml from nwb_linkml.maps.dtype import flat_to_linkml
from nwb_linkml.maps.naming import camel_to_snake from nwb_linkml.maps.naming import camel_to_snake
from nwb_schema_language import Dataset, CompoundDtype from nwb_schema_language import CompoundDtype, Dataset
class DatasetMap(Map): class DatasetMap(Map):
@ -141,9 +142,9 @@ class MapScalarAttributes(DatasetMap):
:linkml: :linkml:
classes: classes:
- name: starting_time - name: starting_time
description: Timestamp of the first sample in seconds. When timestamps are uniformly description: Timestamp of the first sample in seconds. When timestamps are
spaced, the timestamp of the first sample can be specified and all subsequent uniformly spaced, the timestamp of the first sample can be specified and all
ones calculated from the sampling rate attribute. subsequent ones calculated from the sampling rate attribute.
attributes: attributes:
name: name:
name: name name: name
@ -328,8 +329,8 @@ class MapArraylike(DatasetMap):
- null - null
- null - null
- null - null
doc: Binary data representing images across frames. If data are stored in an external doc: Binary data representing images across frames. If data are stored in an
file, this should be an empty 3D array. external file, this should be an empty 3D array.
:linkml: :linkml:
slots: slots:
- name: data - name: data
@ -754,6 +755,7 @@ def is_1d(cls: Dataset) -> bool:
def is_compound(cls: Dataset) -> bool: def is_compound(cls: Dataset) -> bool:
"""Check if dataset has a compound dtype"""
return ( return (
isinstance(cls.dtype, list) isinstance(cls.dtype, list)
and len(cls.dtype) > 0 and len(cls.dtype) > 0

View file

@ -36,7 +36,7 @@ def tmp_output_dir() -> Path:
path = Path(__file__).parent.resolve() / "__tmp__" path = Path(__file__).parent.resolve() / "__tmp__"
if path.exists(): if path.exists():
for subdir in path.iterdir(): for subdir in path.iterdir():
if subdir.name == 'git': if subdir.name == "git":
# don't wipe out git repos every time, they don't rly change # don't wipe out git repos every time, they don't rly change
continue continue
elif subdir.is_file() and subdir.parent != path: elif subdir.is_file() and subdir.parent != path:

View file

@ -1,6 +1,6 @@
import pytest import pytest
from nwb_linkml.adapters.dataset import MapScalar
from nwb_linkml.adapters.dataset import MapScalar
from nwb_schema_language import Dataset from nwb_schema_language import Dataset

View file

@ -265,4 +265,3 @@ class Dataset(DtypeMixin):
dtype: Optional[Union[List[CompoundDtype], FlatDtype, ReferenceDtype]] = Field( dtype: Optional[Union[List[CompoundDtype], FlatDtype, ReferenceDtype]] = Field(
default_factory=list default_factory=list
) )

View file

@ -24,6 +24,7 @@ target-version = "py311"
include = ["nwb_linkml/**/*.py", "nwb_schema_language/src/**/*.py", "pyproject.toml"] include = ["nwb_linkml/**/*.py", "nwb_schema_language/src/**/*.py", "pyproject.toml"]
exclude = [ exclude = [
"docs", "docs",
"nwb_linkml/src/nwb_linkml/models/**/*.py",
"nwb_schema_language/src/nwb_schema_language/datamodel/nwb_schema_language.py", "nwb_schema_language/src/nwb_schema_language/datamodel/nwb_schema_language.py",
"nwb_schema_language/src/nwb_schema_language/datamodel/nwb_schema_pydantic.py", "nwb_schema_language/src/nwb_schema_language/datamodel/nwb_schema_pydantic.py",
"tests/__tmp__/**/*" "tests/__tmp__/**/*"