Regenerate nwb_schema_language models

This commit is contained in:
sneakers-the-rat 2023-10-09 15:06:53 -07:00
parent 6da6ef281f
commit 39c600fda1
20 changed files with 771 additions and 622 deletions

View file

@ -20,7 +20,8 @@ extensions = [
'sphinx.ext.napoleon', 'sphinx.ext.napoleon',
'sphinx.ext.autodoc', 'sphinx.ext.autodoc',
'sphinxcontrib.autodoc_pydantic', 'sphinxcontrib.autodoc_pydantic',
'sphinx.ext.intersphinx' 'sphinx.ext.intersphinx',
'sphinx.ext.doctest'
] ]
templates_path = ['_templates'] templates_path = ['_templates']
@ -68,3 +69,10 @@ graphviz_output_format = "svg"
autodoc_pydantic_model_show_json_error_strategy = 'coerce' autodoc_pydantic_model_show_json_error_strategy = 'coerce'
autodoc_pydantic_model_show_json = False autodoc_pydantic_model_show_json = False
autodoc_mock_imports = ['nptyping'] autodoc_mock_imports = ['nptyping']
# doctest
doctest_global_setup = """
from linkml_runtime.linkml_model import ClassDefinition, SlotDefinition, SchemaDefinition
from pydantic import BaseModel, Field
import numpy as np
"""

View file

@ -72,7 +72,8 @@ build-backend = "poetry.core.masonry.api"
addopts = [ addopts = [
"--cov=nwb_linkml", "--cov=nwb_linkml",
"--cov-append", "--cov-append",
"--cov-config=.coveragerc" "--cov-config=.coveragerc",
"--doctest-modules"
] ]
testpaths = [ testpaths = [
"tests", "tests",

View file

@ -1,4 +1,6 @@
from nwb_linkml.adapters.adapter import Adapter, BuildResult
from nwb_linkml.adapters.namespaces import NamespacesAdapter from nwb_linkml.adapters.namespaces import NamespacesAdapter
from nwb_linkml.adapters.classes import ClassAdapter from nwb_linkml.adapters.classes import ClassAdapter
from nwb_linkml.adapters.group import GroupAdapter from nwb_linkml.adapters.group import GroupAdapter
from nwb_linkml.adapters.schema import SchemaAdapter from nwb_linkml.adapters.schema import SchemaAdapter

View file

@ -1,10 +1,11 @@
""" """
Base class for adapters Base class for adapters
""" """
import pdb
from abc import abstractmethod from abc import abstractmethod
import warnings import warnings
from dataclasses import dataclass, field from dataclasses import dataclass, field
from typing import List, Dict, Type, Generator, Any, Tuple, Optional, TypeVar, TypeVarTuple, Unpack from typing import List, Dict, Type, Generator, Any, Tuple, Optional, TypeVar, TypeVarTuple, Unpack, Literal
from pydantic import BaseModel, Field, validator from pydantic import BaseModel, Field, validator
from linkml_runtime.linkml_model import Element, SchemaDefinition, ClassDefinition, SlotDefinition, TypeDefinition from linkml_runtime.linkml_model import Element, SchemaDefinition, ClassDefinition, SlotDefinition, TypeDefinition
@ -35,15 +36,8 @@ class BuildResult:
self.schemas.extend(self._dedupe(self.schemas, other.schemas)) self.schemas.extend(self._dedupe(self.schemas, other.schemas))
self.classes.extend(self._dedupe(self.classes, other.classes)) self.classes.extend(self._dedupe(self.classes, other.classes))
# existing_names = [c.name for c in self.classes] self.slots.extend(self._dedupe(self.slots, other.slots))
# for newc in other.classes: self.types.extend(self._dedupe(self.types, other.types))
# if newc.name in existing_names:
# warnings.warn(f'Not creating duplicate class for {newc.name}')
# continue
# self.classes.append(newc)
# self.classes.extend(other.classes)
self.slots.extend(other.slots)
self.types.extend(other.types)
return self return self
def __repr__(self): # pragma: no cover def __repr__(self): # pragma: no cover
@ -76,7 +70,13 @@ class Adapter(BaseModel):
Generate the corresponding linkML element for this adapter Generate the corresponding linkML element for this adapter
""" """
def walk(self, input: BaseModel | list | dict): def walk(self, input: BaseModel | list | dict) -> Generator[BaseModel | Any | None, None, None]:
"""
Iterate through all items in the given model.
Could be a staticmethod or a function, but bound to adapters to make it available to them :)
"""
yield input yield input
if isinstance(input, BaseModel): if isinstance(input, BaseModel):
@ -92,7 +92,7 @@ class Adapter(BaseModel):
if isinstance(val, (BaseModel, dict, list)): if isinstance(val, (BaseModel, dict, list)):
yield from self.walk(val) yield from self.walk(val)
elif isinstance(input, dict): elif isinstance(input, dict): # pragma: no cover - not used in our adapters, but necessary for logical completeness
for key, val in input.items(): for key, val in input.items():
yield (key, val) yield (key, val)
if isinstance(val, (BaseModel, dict, list)): if isinstance(val, (BaseModel, dict, list)):
@ -108,12 +108,45 @@ class Adapter(BaseModel):
pass pass
def walk_fields(self, input: BaseModel | list | dict, field: str | Tuple[str, ...]): def walk_fields(self, input: BaseModel | list | dict, field: str | Tuple[str, ...]):
"""
Recursively walk input for fields that match ``field``
Args:
input (:class:`pydantic.BaseModel`) : Model to walk (or a list or dictionary to walk too)
field (str, Tuple[str, ...]):
Returns:
"""
if isinstance(field, str): if isinstance(field, str):
field = (field,) field = (field,)
for item in self.walk(input): for item in self.walk(input):
if isinstance(item, tuple) and item[0] in field and item[1] is not None: if isinstance(item, tuple) and item[0] in field and item[1] is not None:
yield item[1] yield item[1]
def walk_field_values(self, input: BaseModel | list | dict, field: str, value: Optional[Any] = None ) -> Generator[BaseModel, None, None]:
"""
Recursively walk input for **models** that contain a ``field`` as a direct child with a value matching ``value``
Args:
input (:class:`pydantic.BaseModel`): Model to walk
field (str): Name of field - unlike :meth:`.walk_fields`, only one field can be given
value (Any): Value to match for given field. If ``None`` , return models that have the field
Returns:
:class:`pydantic.BaseModel` the matching model
"""
for item in self.walk(input):
if isinstance(item, BaseModel):
if field in item.model_fields:
if value is None:
yield item
field_value = item.model_dump().get(field, None)
if value == field_value:
yield item
def walk_types(self, input: BaseModel | list | dict, get_type: T | List[Unpack[Ts]] | Tuple[Unpack[T]]) -> Generator[T, None, None]: def walk_types(self, input: BaseModel | list | dict, get_type: T | List[Unpack[Ts]] | Tuple[Unpack[T]]) -> Generator[T, None, None]:
if not isinstance(get_type, (list, tuple)): if not isinstance(get_type, (list, tuple)):

View file

@ -69,13 +69,19 @@ def load_schema_file(path:Path, yaml:Optional[dict] = None) -> SchemaAdapter:
) )
return schema return schema
def load_namespace_adapter(namespace: Path | NamespaceRepo | Namespaces, path:Optional[Path]=None) -> NamespacesAdapter: def load_namespace_adapter(
namespace: Path | NamespaceRepo | Namespaces,
path:Optional[Path]=None,
version: Optional[str]=None
) -> NamespacesAdapter:
""" """
Load all schema referenced by a namespace file Load all schema referenced by a namespace file
Args: Args:
namespace (:class:`:class:`.Namespace`): namespace (:class:`:class:`.Namespace`):
path (:class:`pathlib.Path`): Location of the namespace file - all relative paths are interpreted relative to this path (:class:`pathlib.Path`): Optional: Location of the namespace file - all relative paths are interpreted relative to this
version (str): Optional: tag or commit to check out namespace is a :class:`.NamespaceRepo`. If ``None``, use ``HEAD`` if not already checked out,
or otherwise use whatever version is already checked out.
Returns: Returns:
:class:`.NamespacesAdapter` :class:`.NamespacesAdapter`
@ -87,7 +93,7 @@ def load_namespace_adapter(namespace: Path | NamespaceRepo | Namespaces, path:Op
path = namespace path = namespace
namespaces = _load_namespaces(path) namespaces = _load_namespaces(path)
elif isinstance(namespace, NamespaceRepo): elif isinstance(namespace, NamespaceRepo):
path = namespace.provide_from_git() path = namespace.provide_from_git(commit=version)
namespaces = _load_namespaces(namespace) namespaces = _load_namespaces(namespace)
elif isinstance(namespace, Namespaces): elif isinstance(namespace, Namespaces):
@ -116,10 +122,27 @@ def load_namespace_adapter(namespace: Path | NamespaceRepo | Namespaces, path:Op
return adapter return adapter
def load_nwb_core() -> NamespacesAdapter: def load_nwb_core(core_version="2.6.0", hdmf_version="1.5.0") -> NamespacesAdapter:
"""
Convenience function for loading the NWB core schema + hdmf-common as a namespace adapter.
.. note::
NWB Core schema are implicitly linked to a specific version of HDMF common by virtue of which version
of `hdmf-common-schema` is checked out as a submodule in the repository. We don't
attempt to resolve that linkage here because it's not in the schema, but the defaults
are for the latest nwb core ( ``'2.6.0'`` ) and its linked hdmf-common version ( ``'1.5.0'`` )
Args:
core_version (str): an entry in :attr:`.NWB_CORE_REPO.versions`
hdmf_version (str): an entry in :attr:`.NWB_CORE_REPO.versions`
Returns:
"""
# First get hdmf-common: # First get hdmf-common:
hdmf_schema = load_namespace_adapter(HDMF_COMMON_REPO) hdmf_schema = load_namespace_adapter(HDMF_COMMON_REPO, version=hdmf_version)
schema = load_namespace_adapter(NWB_CORE_REPO) schema = load_namespace_adapter(NWB_CORE_REPO, version=core_version)
schema.imported.append(hdmf_schema) schema.imported.append(hdmf_schema)

View file

@ -529,12 +529,6 @@ class CompleteModelGroups(HDF5Map):
unpacked_results, errors, completes = resolve_references(src.result, completed) unpacked_results, errors, completes = resolve_references(src.result, completed)
res.update(unpacked_results) res.update(unpacked_results)
# # final cleanups
# for key, val in res.items():
# # if we're supposed to be a list, but instead we're an array, fix that!
#
#try:
instance = src.model(**res) instance = src.model(**res)
return H5ReadResult( return H5ReadResult(
path=src.path, path=src.path,
@ -548,19 +542,6 @@ class CompleteModelGroups(HDF5Map):
applied=src.applied + ['CompleteModelGroups'], applied=src.applied + ['CompleteModelGroups'],
errors=errors errors=errors
) )
# except ValidationError:
# # didn't get it! try again next time
# return H5ReadResult(
# path=src.path,
# source=src,
# result=src,
# model=src.model,
# completed=True,
# completes=completes,
# neurodata_type=src.neurodata_type,
# namespace=src.namespace,
# applied=src.applied + ['CompleteModelGroups']
# )
class CompleteNWBFile(HDF5Map): class CompleteNWBFile(HDF5Map):
""" """
@ -721,14 +702,6 @@ class ReadQueue(BaseModel):
self.apply_phase(phase, max_passes=max_passes-1) self.apply_phase(phase, max_passes=max_passes-1)
def flatten_hdf(h5f:h5py.File | h5py.Group, skip='specifications') -> Dict[str, H5SourceItem]: def flatten_hdf(h5f:h5py.File | h5py.Group, skip='specifications') -> Dict[str, H5SourceItem]:
""" """
Flatten all child elements of hdf element into a dict of :class:`.H5SourceItem` s keyed by their path Flatten all child elements of hdf element into a dict of :class:`.H5SourceItem` s keyed by their path

View file

@ -1,6 +1,6 @@
import pytest import pytest
import os import os
from typing import NamedTuple from typing import NamedTuple, Optional
from linkml_runtime.dumpers import yaml_dumper from linkml_runtime.dumpers import yaml_dumper
@ -49,9 +49,13 @@ def set_config_vars(tmp_output_dir):
@pytest.fixture(scope="session") @pytest.fixture(
def nwb_core_fixture() -> NamespacesAdapter: scope="session",
nwb_core = io.load_nwb_core() params=[
{'core_version': "2.6.0", 'hdmf_version': '1.5.0'}
])
def nwb_core_fixture(request) -> NamespacesAdapter:
nwb_core = io.load_nwb_core(**request.param)
nwb_core.populate_imports() nwb_core.populate_imports()
return nwb_core return nwb_core
@ -63,34 +67,16 @@ def data_dir() -> Path:
class TestSchemas(NamedTuple): class TestSchemas(NamedTuple):
core: SchemaDefinition core: SchemaDefinition
core_path: Path
imported: SchemaDefinition imported: SchemaDefinition
imported_path: Path
namespace: SchemaDefinition namespace: SchemaDefinition
namespace_path: Path core_path: Optional[Path] = None
imported_path: Optional[Path] = None
namespace_path: Optional[Path] = None
@pytest.fixture(scope="module") @pytest.fixture(scope="module")
def linkml_schema(tmp_output_dir_mod) -> TestSchemas: def linkml_schema_bare() -> TestSchemas:
"""
A test schema that includes
- Two schemas, one importing from the other
- Arraylike
- Required/static "name" field
- linkml metadata like tree_root
- skipping classes
"""
test_schema_path = tmp_output_dir_mod / 'test_schema'
test_schema_path.mkdir()
core_path = test_schema_path / 'core.yaml'
imported_path = test_schema_path / 'imported.yaml'
namespace_path = test_schema_path / 'namespace.yaml'
schema = TestSchemas( schema = TestSchemas(
core_path=core_path,
imported_path=imported_path,
namespace_path=namespace_path,
core=SchemaDefinition( core=SchemaDefinition(
name="core", name="core",
id="core", id="core",
@ -235,6 +221,32 @@ def linkml_schema(tmp_output_dir_mod) -> TestSchemas:
imports=['core', 'imported'] imports=['core', 'imported']
) )
) )
return schema
@pytest.fixture(scope="module")
def linkml_schema(tmp_output_dir_mod, linkml_schema_bare) -> TestSchemas:
"""
A test schema that includes
- Two schemas, one importing from the other
- Arraylike
- Required/static "name" field
- linkml metadata like tree_root
- skipping classes
"""
schema = linkml_schema_bare
test_schema_path = tmp_output_dir_mod / 'test_schema'
test_schema_path.mkdir()
core_path = test_schema_path / 'core.yaml'
imported_path = test_schema_path / 'imported.yaml'
namespace_path = test_schema_path / 'namespace.yaml'
schema.core_path = core_path,
schema.imported_path = imported_path,
schema.namespace_path = namespace_path,
yaml_dumper.dump(schema.core, schema.core_path) yaml_dumper.dump(schema.core, schema.core_path)
yaml_dumper.dump(schema.imported, schema.imported_path) yaml_dumper.dump(schema.imported, schema.imported_path)
yaml_dumper.dump(schema.namespace, schema.namespace_path) yaml_dumper.dump(schema.namespace, schema.namespace_path)

View file

@ -1,7 +1,21 @@
import pdb
import pytest import pytest
from ..fixtures import nwb_core_fixture from ..fixtures import nwb_core_fixture
from nwb_schema_language import Dataset, Group, Schema from linkml_runtime.linkml_model import SchemaDefinition, ClassDefinition, SlotDefinition, TypeDefinition
from nwb_schema_language import Dataset, Group, Schema, CompoundDtype
from nwb_linkml.adapters import BuildResult
from ..fixtures import linkml_schema_bare
def test_walk(nwb_core_fixture):
"""
Not sure exactly what should be tested here, for now just testing that we get an expected value
"""
everything = nwb_core_fixture.walk(nwb_core_fixture)
assert len(list(everything)) == 9959
@pytest.mark.parametrize( @pytest.mark.parametrize(
['walk_class', 'known_number'], ['walk_class', 'known_number'],
@ -17,12 +31,77 @@ def test_walk_types(nwb_core_fixture, walk_class, known_number):
class_list = list(classes) class_list = list(classes)
assert len(class_list) == known_number assert len(class_list) == known_number
# pdb.set_trace() def test_walk_fields(nwb_core_fixture):
dtype = nwb_core_fixture.walk_fields(nwb_core_fixture, 'dtype')
def test_build_result_add(): def test_walk_field_values(nwb_core_fixture):
dtype_models = list(nwb_core_fixture.walk_field_values(nwb_core_fixture, 'dtype', value=None))
compounds = [d for d in dtype_models if isinstance(d.dtype, list) and len(d.dtype) > 0 and isinstance(d.dtype[0], CompoundDtype)]
def test_build_result(linkml_schema_bare):
""" """
Build results can build results can hold lists of class, slot, and type definitions
Returns: """
schema = linkml_schema_bare
sch = schema.core
cls = sch.classes['MainTopLevel']
slot1 = cls.attributes['name']
typ = sch.types['numeric']
# Build result should hold the results and coerce to list type
res = BuildResult(
schemas=sch,
classes=cls,
slots=slot1,
types=typ
)
for field in ('schemas', 'classes', 'slots', 'types'):
assert isinstance(getattr(res, field), list)
assert len(getattr(res, field)) == 1
@pytest.mark.parametrize(
'sch_type',
('schemas', 'classes', 'slots', 'types')
)
def test_build_result_add(linkml_schema_bare, sch_type):
"""
Build results can be added together without duplicating
"""
schema = linkml_schema_bare
if sch_type == 'schemas':
obj = schema.core
other_obj = SchemaDefinition(name="othername", id="othername", version="1.0.1")
elif sch_type == 'classes':
obj = schema.core.classes['MainTopLevel']
other_obj = ClassDefinition(name="othername")
elif sch_type == 'slots':
obj = schema.core.classes['MainTopLevel'].attributes['name']
other_obj = SlotDefinition(name="othername", range="string")
elif sch_type == 'types':
obj = schema.core.types['numeric']
other_obj = TypeDefinition(name="othername", typeof="float")
else:
raise ValueError(f"Dont know how to test type {sch_type}")
res1 = BuildResult(**{sch_type: [obj]})
res2 = BuildResult(**{sch_type: [obj]})
assert len(getattr(res1, sch_type)) == 1
assert len(getattr(res2, sch_type)) == 1
assert len(getattr(res1 + res2, sch_type)) == 1
assert len(getattr(res2 + res1, sch_type)) == 1
# and then addition works as normal for not same named items
res3 = BuildResult(**{sch_type: [other_obj]})
assert len(getattr(res1 + res3, sch_type)) == 2
assert len(getattr(res2 + res3, sch_type)) == 2
res_combined_2 = res1 + res3
assert getattr(res_combined_2, sch_type)[-1] is other_obj
"""

File diff suppressed because it is too large Load diff

View file

@ -82,7 +82,7 @@ type Namespace
name: String! name: String!
fullName: String fullName: String
version: String! version: String!
date: Date date: Datetime
author: [String]! author: [String]!
contact: [String]! contact: [String]!
schema: [Schema] schema: [Schema]

View file

@ -1,7 +1,7 @@
{ {
"comments": { "comments": {
"description": "Auto generated by LinkML jsonld context generator", "description": "Auto generated by LinkML jsonld context generator",
"generation_date": "2023-08-30T20:53:58", "generation_date": "2023-10-09T15:03:06",
"source": "nwb_schema_language.yaml" "source": "nwb_schema_language.yaml"
}, },
"@context": { "@context": {
@ -23,7 +23,7 @@
"@type": "@id" "@type": "@id"
}, },
"date": { "date": {
"@type": "xsd:date", "@type": "xsd:dateTime",
"@id": "schema:dateModified" "@id": "schema:dateModified"
}, },
"default_value": { "default_value": {

View file

@ -588,7 +588,7 @@
"domain_of": [ "domain_of": [
"Namespace" "Namespace"
], ],
"range": "date", "range": "datetime",
"@type": "SlotDefinition" "@type": "SlotDefinition"
}, },
{ {
@ -1504,9 +1504,9 @@
], ],
"metamodel_version": "1.7.0", "metamodel_version": "1.7.0",
"source_file": "nwb_schema_language.yaml", "source_file": "nwb_schema_language.yaml",
"source_file_date": "2023-08-30T20:53:55", "source_file_date": "2023-08-31T15:31:11",
"source_file_size": 10793, "source_file_size": 10797,
"generation_date": "2023-08-30T20:53:59", "generation_date": "2023-10-09T15:03:07",
"settings": [ "settings": [
{ {
"setting_key": "email", "setting_key": "email",

View file

@ -408,7 +408,7 @@
}, },
"date": { "date": {
"description": "Date that a namespace was last modified or released", "description": "Date that a namespace was last modified or released",
"format": "date", "format": "date-time",
"type": "string" "type": "string"
}, },
"doc": { "doc": {

View file

@ -66,7 +66,7 @@ message Namespace
string name = 0 string name = 0
string fullName = 0 string fullName = 0
string version = 0 string version = 0
date date = 0 datetime date = 0
repeated string author = 0 repeated string author = 0
repeated string contact = 0 repeated string contact = 0
repeated schema schema = 0 repeated schema schema = 0

View file

@ -145,7 +145,7 @@ linkml:Sparqlpath xsd:string
<name> @linkml:String ; <name> @linkml:String ;
<full_name> @linkml:String ? ; <full_name> @linkml:String ? ;
<version> @linkml:String ; <version> @linkml:String ;
schema1:dateModified @linkml:Date ? ; schema1:dateModified @linkml:Datetime ? ;
schema1:author @linkml:String + ; schema1:author @linkml:String + ;
schema1:email @linkml:String + ; schema1:email @linkml:String + ;
<schema_> @<Schema> * <schema_> @<Schema> *

View file

@ -74,7 +74,7 @@ CREATE TABLE "Namespace" (
name TEXT NOT NULL, name TEXT NOT NULL,
full_name TEXT, full_name TEXT,
version TEXT NOT NULL, version TEXT NOT NULL,
date DATE, date DATETIME,
author TEXT NOT NULL, author TEXT NOT NULL,
contact TEXT NOT NULL, contact TEXT NOT NULL,
schema_ TEXT, schema_ TEXT,

View file

@ -1,7 +1,6 @@
import warnings import warnings
from typing import List, Union from typing import List, Union
try: try:
pass
from .datamodel.nwb_schema_pydantic import Namespace, \ from .datamodel.nwb_schema_pydantic import Namespace, \
Namespaces, \ Namespaces, \
Schema, \ Schema, \

View file

@ -1,5 +1,5 @@
# Auto generated from nwb_schema_language.yaml by pythongen.py version: 0.0.1 # Auto generated from nwb_schema_language.yaml by pythongen.py version: 0.0.1
# Generation date: 2023-08-30T20:54:02 # Generation date: 2023-10-09T15:03:09
# Schema: nwb-schema-language # Schema: nwb-schema-language
# #
# id: https://w3id.org/p2p_ld/nwb-schema-language # id: https://w3id.org/p2p_ld/nwb-schema-language
@ -21,8 +21,8 @@ from linkml_runtime.utils.formatutils import camelcase, underscore, sfx
from linkml_runtime.utils.enumerations import EnumDefinitionImpl from linkml_runtime.utils.enumerations import EnumDefinitionImpl
from rdflib import Namespace, URIRef from rdflib import Namespace, URIRef
from linkml_runtime.utils.curienamespace import CurieNamespace from linkml_runtime.utils.curienamespace import CurieNamespace
from linkml_runtime.linkml_model.types import Boolean, Date, String from linkml_runtime.linkml_model.types import Boolean, Datetime, String
from linkml_runtime.utils.metamodelcore import Bool, XSDDate from linkml_runtime.utils.metamodelcore import Bool, XSDDateTime
metamodel_version = "1.7.0" metamodel_version = "1.7.0"
version = None version = None
@ -58,7 +58,7 @@ class Namespace(YAMLRoot):
author: Union[str, List[str]] = None author: Union[str, List[str]] = None
contact: Union[str, List[str]] = None contact: Union[str, List[str]] = None
full_name: Optional[str] = None full_name: Optional[str] = None
date: Optional[Union[str, XSDDate]] = None date: Optional[Union[str, XSDDateTime]] = None
schema_: Optional[Union[Union[dict, "Schema"], List[Union[dict, "Schema"]]]] = empty_list() schema_: Optional[Union[Union[dict, "Schema"], List[Union[dict, "Schema"]]]] = empty_list()
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]): def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
@ -92,8 +92,8 @@ class Namespace(YAMLRoot):
if self.full_name is not None and not isinstance(self.full_name, str): if self.full_name is not None and not isinstance(self.full_name, str):
self.full_name = str(self.full_name) self.full_name = str(self.full_name)
if self.date is not None and not isinstance(self.date, XSDDate): if self.date is not None and not isinstance(self.date, XSDDateTime):
self.date = XSDDate(self.date) self.date = XSDDateTime(self.date)
if not isinstance(self.schema_, list): if not isinstance(self.schema_, list):
self.schema_ = [self.schema_] if self.schema_ is not None else [] self.schema_ = [self.schema_] if self.schema_ is not None else []
@ -629,7 +629,7 @@ slots.version = Slot(uri=NWB_SCHEMA_LANGUAGE.version, name="version", curie=NWB_
pattern=re.compile(r'^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$')) pattern=re.compile(r'^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$'))
slots.date = Slot(uri=SCHEMA.dateModified, name="date", curie=SCHEMA.curie('dateModified'), slots.date = Slot(uri=SCHEMA.dateModified, name="date", curie=SCHEMA.curie('dateModified'),
model_uri=NWB_SCHEMA_LANGUAGE.date, domain=None, range=Optional[Union[str, XSDDate]]) model_uri=NWB_SCHEMA_LANGUAGE.date, domain=None, range=Optional[Union[str, XSDDateTime]])
slots.author = Slot(uri=SCHEMA.author, name="author", curie=SCHEMA.curie('author'), slots.author = Slot(uri=SCHEMA.author, name="author", curie=SCHEMA.curie('author'),
model_uri=NWB_SCHEMA_LANGUAGE.author, domain=None, range=Union[str, List[str]]) model_uri=NWB_SCHEMA_LANGUAGE.author, domain=None, range=Union[str, List[str]])

View file

@ -112,8 +112,8 @@ class Namespace(ConfiguredBaseModel):
full_name: Optional[str] = Field(None, description="""Optional string with extended full name for the namespace.""") full_name: Optional[str] = Field(None, description="""Optional string with extended full name for the namespace.""")
version: str = Field(...) version: str = Field(...)
date: Optional[datetime ] = Field(None, description="""Date that a namespace was last modified or released""") date: Optional[datetime ] = Field(None, description="""Date that a namespace was last modified or released""")
author: List[str] | str = Field(default_factory=list, description="""List of strings with the names of the authors of the namespace.""") author: List[str] = Field(default_factory=list, description="""List of strings with the names of the authors of the namespace.""")
contact: List[str] | str = Field(default_factory=list, description="""List of strings with the contact information for the authors. Ordering of the contacts should match the ordering of the authors.""") contact: List[str] = Field(default_factory=list, description="""List of strings with the contact information for the authors. Ordering of the contacts should match the ordering of the authors.""")
schema_: Optional[List[Schema]] = Field(alias="schema", default_factory=list, description="""List of the schema to be included in this namespace.""") schema_: Optional[List[Schema]] = Field(alias="schema", default_factory=list, description="""List of the schema to be included in this namespace.""")