commit 615835d57dc1bf705e2ab79531afc81d34f7b8f6 Author: Benedith Mulongo Date: Thu Oct 10 16:37:08 2024 +0200 Improve norduniclient from nordunet diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 0000000..8ab71f2 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,52 @@ +name: Python CI + +on: + push: + branches: [main] + pull_request: + branches: [main] + +jobs: + test: + runs-on: ubuntu-latest + + services: + neo4j: + image: neo4j:3.5 + ports: + - 7687:7687 # Bolt port + - 7474:7474 # Web + options: > + --env NEO4J_AUTH=neo4j/testpassword + --env NEO4J_dbms_memory_heap_initial__size=512m + --env NEO4J_dbms_memory_heap_max__size=1G + + steps: + - uses: actions/checkout@v3 + + - name: Set up Python + uses: actions/setup-python@v2 + with: + python-version: '3.9' + + - name: Install dependencies + run: | + curl -sSL https://install.python-poetry.org | python3 - + poetry install + + - name: Wait for Neo4j to be ready + run: | + echo "Waiting for Neo4j to start..." + sleep 30 + + - name: Run Tests + env: + NEO4J_HTTP_PORT: 7474 + NEO4J_BOLT_PORT: 7687 + NEO4J_HOSTNAME: localhost + NEO4J_USER: neo4j + NEO4J_PASSWORD: testpassword + run: | + # Ensure we can connect to Neo4j + curl -I http://localhost:7474 + poetry run python -m unittest discover -s tests \ No newline at end of file diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..e42b9d7 --- /dev/null +++ b/.gitignore @@ -0,0 +1,97 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +env/ +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +*.egg-info/ +.installed.cfg +*.egg + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +.nox +/.nox +.mypy_cache +htmlcov/ +.tox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*,cover +.hypothesis/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# IPython Notebook +.ipynb_checkpoints + +# pyenv +.python-version + +# celery beat schedule file +celerybeat-schedule + +# dotenv +.env + +# virtualenv +venv/ +ENV/ +env + +# Spyder project settings +.spyderproject + +# Rope project settings +.ropeproject + +# PyCharm project settings +.idea + diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml new file mode 100644 index 0000000..085610e --- /dev/null +++ b/.gitlab-ci.yml @@ -0,0 +1,33 @@ +--- +stages: + - test + - deploy + +image: python:3 + +variables: + NEO4J_AUTH: neo4j/testing + +services: + - neo4j:4.4 + + +before_script: + - python -V + +test: + stage: test + tags: + - docker + script: + - python setup.py testing + - python setup.py test + +deploy: + stage: deploy + only: + - tags + script: + - pip install -U twine build + - python -m build + - twine upload dist/* --verbose diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..6b0b127 --- /dev/null +++ b/LICENSE @@ -0,0 +1,203 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/README.md b/README.md new file mode 100644 index 0000000..ad5e82a --- /dev/null +++ b/README.md @@ -0,0 +1,116 @@ +# python-norduniclient +[![PyPI](https://img.shields.io/pypi/v/norduniclient.svg)](https://pypi.python.org/pypi/norduniclient) + + +Neo4j database client for NORDUnet network inventory + +## Setup + +``` +poetry shell +poetry install +``` + +## Running tests + +Add the following environment variables: + +```env +NEO4J_HTTP_PORT=7474 +NEO4J_BOLT_PORT=7687 +NEO4J_HOSTNAME=xx +NEO4J_USER=xx +NEO4J_PASSWORD=xx +``` + +and run + +```bash +poetry run python -m unittest discover +``` + +or save the environment variables in a local file `.env` and run it with [dotenvx](https://dotenvx.com/) as follows: + + +```bash +dotenvx run -- poetry run python -m unittest discover +``` + +or + +```bash +nox -rs tests +``` + +```bash +nox -rs tests_dotenv +``` + + + +## Installation + +```bash +pip install norduniclient +``` + + +python3 -m pip install --index-url https://platform.sunet.se/api/packages/benedith/pypi/simple/ --extra-index-url https://pypi.org/simple/ norduniclient + +python3 -m pip install -i https://test.pypi.org/simple/ --extra-index-url https://pypi.org/simple/ jolieprinter + +## Usage + +```python +import norduniclient as nc + +NODE_META_TYPE_CHOICES = zip(nc.META_TYPES, nc.META_TYPES) + +print("nc.META_TYPES=", nc.META_TYPES) +``` + + +### Poetry guide + +``` +poetry run python [operation] +``` + +### Add depenency + +#### Add a new lib +```bash +poetry add +``` +#### Remove a lib +```bash +poetry remove +``` + + +#### Get venv path +```bash +poetry run which python +``` + +#### Show dependencies +```bash +poetry show +``` +```bash +poetry run pip list +``` + +#### List configuratiom +```bash +poetry config --list +``` + +### Publish + +```bash +poetry config repositories.pypi https://upload.pypi.org/legacy/ +poetry config pypi-token.pypi [token] +poetry publish --build --repository pypi +poetry publish --build --repository testpypi +``` \ No newline at end of file diff --git a/norduniclient-package.md b/norduniclient-package.md new file mode 100644 index 0000000..21dc494 --- /dev/null +++ b/norduniclient-package.md @@ -0,0 +1,44 @@ +# python-norduniclient +[![PyPI](https://img.shields.io/pypi/v/norduniclient.svg)](https://pypi.python.org/pypi/norduniclient) + + +Neo4j database client for NORDUnet network inventory + +## Setup + +``` +poetry shell +poetry install +``` + +## Running tests + +Add the following environment variables: + +```bash +NEO4J_HTTP_PORT=7474 +NEO4J_BOLT_PORT=7687 +NEO4J_HOSTNAME=xx +NEO4J_USER=xx +NEO4J_PASSWORD=xx +``` + +``` +poetry run python -m unittest discover +``` + +## Installation + +```bash +pip install norduniclient +``` + +## Usage + +```python +import norduniclient as nc + +NODE_META_TYPE_CHOICES = zip(nc.META_TYPES, nc.META_TYPES) + +print("nc.META_TYPES=", nc.META_TYPES) +``` diff --git a/norduniclient/__init__.py b/norduniclient/__init__.py new file mode 100644 index 0000000..37aefdc --- /dev/null +++ b/norduniclient/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- + +from __future__ import absolute_import +from norduniclient.core import * + +__author__ = 'lundberg' + + +# Init as singleton for easy use in Django +# You can use it like this: +# from norduniclient import graphdb as db +# get_node(db.manager, 'node_id*) +graphdb = GraphDB.get_instance() + +neo4jdb = graphdb.manager # Works as the old neo4jdb diff --git a/norduniclient/contextmanager.py b/norduniclient/contextmanager.py new file mode 100644 index 0000000..458c838 --- /dev/null +++ b/norduniclient/contextmanager.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- + +from __future__ import absolute_import + +from contextlib import contextmanager +from norduniclient.core import get_db_driver + +__author__ = 'lundberg' + + +class Neo4jDBSessionManager: + + """ + Every new connection is a transaction. To minimize new connection overhead for many reads we try to reuse a single + connection. If this seem like a bad idea some kind of connection pool might work better. + + Neo4jDBSessionManager.session() + + Neo4jDBSessionManager.transaction() + """ + + def __init__(self, uri, username=None, password=None, encrypted=True, max_pool_size=50): + self.uri = uri + self.driver = get_db_driver(uri, username, password, encrypted, max_pool_size) + + @contextmanager + def _session(self): + session = self.driver.session() + try: + yield session + except Exception as e: + raise e + finally: + try: + session.close() + except Exception: + pass + session = property(_session) + + @contextmanager + def _transaction(self): + session = self.driver.session() + transaction = session.begin_transaction() + try: + yield transaction + except Exception as e: + transaction.success = False + raise e + else: + transaction.success = True + finally: + try: + session.close() + except Exception: + pass + transaction = property(_transaction) diff --git a/norduniclient/core.py b/norduniclient/core.py new file mode 100644 index 0000000..2e7ff5f --- /dev/null +++ b/norduniclient/core.py @@ -0,0 +1,713 @@ +# -*- coding: utf-8 -*- +# +# core.py +# +# Copyright 2016 Johan Lundberg +# + +# This started as an extension to the Neo4j REST client made by Versae, continued +# as an extension for the official Neo4j python bindings when they were released +# (Neo4j 1.5, python-embedded). +# +# After the release of neo4j 3.0 and the bolt protocol we replaced neo4jdb-python with +# the official Neo4j driver. +# +# The goal is to make it easier to add and retrieve data from a Neo4j database +# according to the NORDUnet Network Inventory data model. +# +# More information about NORDUnet Network Inventory: +# https://portal.nordu.net/display/NI/ + +from __future__ import absolute_import + +from neo4j import GraphDatabase +from neo4j.exceptions import DatabaseError, ClientError +from neo4j.api import basic_auth +from norduniclient import exceptions +from norduniclient import models + +import logging +logger = logging.getLogger(__name__) + +# Load Django settings +NEO4J_URI, NEO4J_USERNAME, NEO4J_PASSWORD = None, None, None +MAX_POOL_SIZE = 50 +ENCRYPTED = False +try: + from django.conf import settings as django_settings + try: + # Mandatory Django settings for quick init + NEO4J_URI = django_settings.NEO4J_RESOURCE_URI + NEO4J_USERNAME = django_settings.NEO4J_USERNAME + NEO4J_PASSWORD = django_settings.NEO4J_PASSWORD + except AttributeError: + pass + # Optional Django settings for quick init + try: + MAX_POOL_SIZE = django_settings.NEO4J_MAX_POOL_SIZE + except AttributeError: + pass + try: + ENCRYPTED = django_settings.NEO4J_ENCRYPTED + except AttributeError: + pass +except ImportError: + logger.info('Starting up without a Django environment.') + logger.info('Initial: norduniclient.neo4jdb == None.') + logger.info('Use norduniclient.init_db to open a database connection.') + + +META_TYPES = ['Physical', 'Logical', 'Relation', 'Location'] + + +class GraphDB(object): + + _instance = None + _manager = None + + @classmethod + def get_instance(cls): + if cls._instance is None: + cls._instance = cls() + return cls._instance + + def __init__(self): + self._manager = self.manager + + @property + def manager(self): + if self._manager is None: + try: + self._manager = init_db() + except Exception as e: + logger.error('Could not create manager: {}'.format(e)) + self._manager = None + return self._manager + + @manager.setter + def manager(self, manager): + self._manager = manager + + +def init_db(uri=NEO4J_URI, username=NEO4J_USERNAME, password=NEO4J_PASSWORD, encrypted=ENCRYPTED, + max_pool_size=MAX_POOL_SIZE): + if uri: + try: + from norduniclient.contextmanager import Neo4jDBSessionManager + manager = Neo4jDBSessionManager(uri=uri, username=username, password=password, encrypted=encrypted, + max_pool_size=max_pool_size) + try: + with manager.session as s: + s.run('CREATE CONSTRAINT ON (n:Node) ASSERT n.handle_id IS UNIQUE') + except ClientError as e: + if e.title == 'EquivalentSchemaRuleAlreadyExists': + logger.info('Unique constraint already exists') + else: + raise e + except Exception as e: + logger.error('Could not create constraints for Neo4j database: {!s}'.format(uri)) + raise e + try: + create_index(manager, 'name') + except ClientError as e: + if e.title == 'EquivalentSchemaRuleAlreadyExists': + logger.info('Index already exists') + else: + logger.error('Could not create index for Neo4j database: {!s}'.format(uri)) + raise e + except Exception as e: + logger.error('Could not create index for Neo4j database: {!s}'.format(uri)) + raise e + return manager + except DatabaseError as e: + logger.warning('Could not connect to Neo4j database: {!s}'.format(uri)) + raise e + + +def get_db_driver(uri, username=None, password=None, encrypted=True, max_pool_size=50, trust=0): + """ + :param uri: Bolt uri + :type uri: str + :param username: Neo4j username + :type username: str + :param password: Neo4j password + :type password: str + :param encrypted: Use TLS + :type encrypted: Boolean + :param max_pool_size: Maximum number of idle sessions + :type max_pool_size: Integer + :param trust: Trust cert on first use (0) or do not accept unknown cert (1) + :type trust: Integer + :return: Neo4j driver + :rtype: neo4j.session.Driver + """ + + return GraphDatabase.driver(uri, auth=basic_auth(username, password), encrypted=encrypted, + max_connection_pool_size=max_pool_size, trust='TRUST_ALL_CERTIFICATES') + + +def query_to_dict(manager, query, **kwargs): + d = {} + with manager.session as s: + result = s.run(query, kwargs) + for record in result: + for key, value in record.items(): + d[key] = value + return d + + +def query_to_list(manager, query, **kwargs): + out = [] + with manager.session as s: + result = s.run(query, kwargs) + for record in result: + d = {} + for key, value in record.items(): + d[key] = value + out.append(d) + return out + + +def query_to_iterator(manager, query, **kwargs): + with manager.session as s: + result = s.run(query, kwargs) + for record in result: + d = {} + for key, value in record.items(): + d[key] = value + yield d + + +def neo4j_entity_to_dict(node): + return {k: v for k, v in node.items()} + + +def create_node(manager, name, meta_type_label, type_label, handle_id): + """ + Creates a node with the mandatory attributes name and handle_id also sets type label. + + :param manager: Manager to handle sessions and transactions + :param name: Node name + :param meta_type_label: Node meta type + :param type_label: Node label + :param handle_id: Unique id + + :type manager: norduniclient.contextmanager.Neo4jDBSessionManager + :type name: str|unicode + :type meta_type_label: str|unicode + :type type_label: str|unicode + :type handle_id: str|unicode + + :rtype: dict + """ + if meta_type_label not in META_TYPES: + raise exceptions.MetaLabelNamingError(meta_type_label) + q = """ + CREATE (n:Node:%s:%s { name: $name, handle_id: $handle_id}) + RETURN n + """ % (meta_type_label, type_label) + with manager.session as s: + return neo4j_entity_to_dict(s.run(q, {'name': name, 'handle_id': handle_id}).single()['n']) + + +def get_node(manager, handle_id): + """ + :param manager: Manager to handle sessions and transactions + :param handle_id: Unique id + + :type manager: norduniclient.contextmanager.Neo4jDBSessionManager + :type handle_id: str|unicode + + :rtype: dict|neo4j.types.Node + """ + q = 'MATCH (n:Node { handle_id: $handle_id }) RETURN n' + + with manager.session as s: + result = s.run(q, {'handle_id': handle_id}).single() + if result: + return neo4j_entity_to_dict(result['n']) + raise exceptions.NodeNotFound(manager, handle_id) + + +def get_node_bundle(manager, handle_id=None, node=None): + """ + :param manager: Neo4jDBSessionManager + :param handle_id: Unique id + :type handle_id: str|unicode + :param node: Node object + :type node: neo4j.types.Node + :return: dict + """ + if not node: + q = 'MATCH (n:Node { handle_id: $handle_id }) RETURN n' + with manager.session as s: + result = s.run(q, {'handle_id': handle_id}).single() + if not result: + raise exceptions.NodeNotFound(manager, handle_id) + node = result['n'] + d = { + 'data': neo4j_entity_to_dict(node) + } + labels = list(node.labels) + labels.remove('Node') # All nodes have this label for indexing + for label in labels: + if label in META_TYPES: + d['meta_type'] = label + labels.remove(label) + d['labels'] = labels + return d + + +def delete_node(manager, handle_id): + """ + Deletes the node and all its relationships. + + :param manager: Neo4jDBSessionManager + :param handle_id: Unique id + + :rtype: bool + """ + q = """ + MATCH (n:Node {handle_id: $handle_id}) + OPTIONAL MATCH (n)-[r]-() + DELETE n,r + """ + with manager.session as s: + s.run(q, {'handle_id': handle_id}) + return True + + +def get_relationship(manager, relationship_id): + """ + :param manager: Manager to handle sessions and transactions + :param relationship_id: Unique id + + :type manager: norduniclient.contextmanager.Neo4jDBSessionManager + :type relationship_id: int + + :rtype int|neo4j.types.Relationship + """ + q = """ + MATCH ()-[r]->() + WHERE ID(r) = $relationship_id + RETURN r + """ + with manager.session as s: + record = s.run(q, {'relationship_id': int(relationship_id)}).single() + if record: + return neo4j_entity_to_dict(record['r']) + raise exceptions.RelationshipNotFound(manager, int(relationship_id)) + + +def get_relationship_bundle(manager, relationship_id=None): + """ + :param manager: Neo4jDBSessionManager + :param relationship_id: Internal Neo4j id + + :type relationship_id: int + + :rtype: dictionary + """ + q = """ + MATCH (start)-[r]->(end) + WHERE ID(r) = $relationship_id + RETURN start, r, end + """ + + with manager.session as s: + record = s.run(q, {'relationship_id': int(relationship_id)}).single() + if record is None: + raise exceptions.RelationshipNotFound(manager, int(relationship_id)) + + return { + 'type': record['r'].type, + 'id': int(relationship_id), + 'data': neo4j_entity_to_dict(record['r']), + 'start': neo4j_entity_to_dict(record['start']), + 'end': neo4j_entity_to_dict(record['end']), + } + + +def delete_relationship(manager, relationship_id): + """ + Deletes the relationship. + + :param manager: Neo4jDBSessionManager + :param relationship_id: Internal Neo4j relationship id + :return: bool + """ + q = """ + MATCH ()-[r]->() + WHERE ID(r) = $relationship_id + DELETE r + """ + with manager.session as s: + s.run(q, {'relationship_id': int(relationship_id)}) + return True + + +def get_node_meta_type(manager, handle_id): + """ + Returns the meta type of the supplied node as a string. + + :param manager: Neo4jDBSessionManager + :param handle_id: Unique id + :return: string + """ + node = get_node_bundle(manager=manager, handle_id=handle_id) + if 'meta_type' not in node: + raise exceptions.NoMetaLabelFound(handle_id) + return node['meta_type'] + + +# TODO: Try out elasticsearch +def get_nodes_by_value(manager, value, prop, node_type='Node'): + """ + Traverses all nodes or nodes of specified label and compares the property/properties of the node + with the supplied string. + + :param manager: Neo4jDBSessionManager + :param value: Value to search for + :param prop: Which property to look for value in + :param node_type: + + :type value: str|list|bool|int + :type prop: str + :type node_type: str + :return: dicts + """ + q = """ + MATCH (n:{label}) + WHERE n.{prop} = $value + RETURN distinct n + """.format(label=node_type, prop=prop) + + with manager.session as s: + for result in s.run(q, {'value': value}): + yield neo4j_entity_to_dict(result['n']) + + +def get_node_by_type(manager, node_type): + q = """ + MATCH (n:{label}) + RETURN distinct n + """.format(label=node_type) + with manager.session as s: + for result in s.run(q): + yield neo4j_entity_to_dict(result['n']) + + +def search_nodes_by_value(manager, value, prop=None, node_type='Node'): + """ + Traverses all nodes or nodes of specified label and fuzzy compares the property/properties of the node + with the supplied string. + + :param manager: Neo4jDBSessionManager + :param value: Value to search for + :param prop: Which property to look for value in + :param node_type: + + :type value: str + :type prop: str + :type node_type: str + :return: dicts + """ + if prop: + q = """ + MATCH (n:{label}) + WHERE n.{prop} =~ "(?i).*{value}.*" OR any(x IN n.{prop} WHERE x =~ "(?i).*{value}.*") + RETURN distinct n + """.format(label=node_type, prop=prop, value=value) + else: + q = """ + MATCH (n:{label}) + WITH n, keys(n) as props + WHERE any(prop in props WHERE n[prop] =~ "(?i).*{value}.*") OR + any(prop in props WHERE any(x IN n[prop] WHERE x =~ "(?i).*{value}.*")) + RETURN distinct n + """.format(label=node_type, value=value) + + with manager.session as s: + for result in s.run(q): + yield result['n'] + + +# TODO: Try out elasticsearch +def get_nodes_by_type(manager, node_type): + q = """ + MATCH (n:{label}) + RETURN n + """.format(label=node_type) + with manager.session as s: + for result in s.run(q): + yield result['n'] + + +# TODO: Try out elasticsearch +def get_nodes_by_name(manager, name): + q = """ + MATCH (n:Node {name: $name}) + RETURN n + """ + with manager.session as s: + for result in s.run(q, {'name': name}): + yield result['n'] + + +def create_index(manager, prop, node_type='Node'): + """ + :param manager: Neo4jDBSessionManager + :param prop: Property to index + :param node_type: Label to create index on + + :type manager: Neo4jDBSessionManager + :type prop: str + :type node_type: str + """ + with manager.session as s: + s.run('CREATE INDEX ON :{node_type}({prop})'.format(node_type=node_type, prop=prop)) + + +def get_indexed_node(manager, prop, value, node_type='Node', lookup_func='CONTAINS'): + """ + :param manager: Neo4jDBSessionManager + :param prop: Indexed property + :param value: Indexed value + :param node_type: Label used for index + :param lookup_func: STARTS WITH | CONTAINS | ENDS WITH + + :type manager: Neo4jDBSessionManager + :type prop: str + :type value: str + :type node_type: str + :type lookup_func: str + + :return: Dict or Node object + :rtype: dict|Node + """ + q = """ + MATCH (n:{label}) + WHERE toLower(n.{prop}) {lookup_func} toLower($value) + RETURN n + """.format(label=node_type, prop=prop, lookup_func=lookup_func) + with manager.session as s: + for result in s.run(q, {'value': value}): + yield neo4j_entity_to_dict(result['n']) + + +def get_unique_node_by_name(manager, node_name, node_type): + """ + Returns the node if the node is unique for name and type or None. + + :param manager: Neo4jDBSessionManager + :param node_name: string + :param node_type: str|unicode + :return: norduniclient node model or None + """ + q = """ + MATCH (n:Node { name: $name }) + WHERE $label IN labels(n) + RETURN n.handle_id as handle_id + """ + + with manager.session as s: + result = list(s.run(q, {'name': node_name, 'label': node_type})) + + if result: + if len(result) == 1: + return get_node_model(manager, result[0]['handle_id']) + raise exceptions.MultipleNodesReturned(node_name, node_type) + return None + + +def _create_relationship(manager, handle_id, other_handle_id, rel_type): + """ + :param manager: Context manager to handle transactions + :param handle_id: Node handle id + :param other_handle_id: Other node handle id + :param rel_type: Relationship type + + :type manager: Neo4jDBSessionManager + :type handle_id: str|unicode + :type other_handle_id: str|unicode + :type rel_type: str|unicode + + :rtype: int relationship_id + """ + + q = """ + MATCH (a:Node {handle_id: $start}),(b:Node {handle_id: $end}) + CREATE (a)-[r:%s]->(b) + RETURN r + """ % rel_type + + with manager.session as s: + return s.run(q, {'start': handle_id, 'end': other_handle_id}).single()['r'].id + + +def create_location_relationship(manager, location_handle_id, other_handle_id, rel_type): + """ + Makes relationship between the two nodes and returns the relationship. + If a relationship is not possible NoRelationshipPossible exception is + raised. + """ + other_meta_type = get_node_meta_type(manager, other_handle_id) + if other_meta_type == 'Location' and rel_type == 'Has': + return _create_relationship(manager, location_handle_id, other_handle_id, rel_type) + raise exceptions.NoRelationshipPossible(location_handle_id, 'Location', other_handle_id, other_meta_type, rel_type) + + +def create_logical_relationship(manager, logical_handle_id, other_handle_id, rel_type): + """ + Makes relationship between the two nodes and returns the relationship. + If a relationship is not possible NoRelationshipPossible exception is + raised. + """ + other_meta_type = get_node_meta_type(manager, other_handle_id) + if rel_type == 'Depends_on': + if other_meta_type == 'Logical' or other_meta_type == 'Physical': + return _create_relationship(manager, logical_handle_id, other_handle_id, rel_type) + elif rel_type == 'Part_of': + if other_meta_type == 'Physical': + return _create_relationship(manager, logical_handle_id, other_handle_id, rel_type) + raise exceptions.NoRelationshipPossible(logical_handle_id, 'Logical', other_handle_id, other_meta_type, rel_type) + + +def create_relation_relationship(manager, relation_handle_id, other_handle_id, rel_type): + """ + Makes relationship between the two nodes and returns the relationship. + If a relationship is not possible NoRelationshipPossible exception is + raised. + """ + other_meta_type = get_node_meta_type(manager, other_handle_id) + if other_meta_type == 'Logical': + if rel_type in ['Uses', 'Provides']: + return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type) + elif other_meta_type == 'Location' and rel_type == 'Responsible_for': + return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type) + elif other_meta_type == 'Physical': + if rel_type in ['Owns', 'Provides']: + return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type) + raise exceptions.NoRelationshipPossible(relation_handle_id, 'Relation', other_handle_id, other_meta_type, rel_type) + + +def create_physical_relationship(manager, physical_handle_id, other_handle_id, rel_type): + """ + Makes relationship between the two nodes and returns the relationship. + If a relationship is not possible NoRelationshipPossible exception is + raised. + """ + other_meta_type = get_node_meta_type(manager, other_handle_id) + if other_meta_type == 'Physical': + if rel_type == 'Has' or rel_type == 'Connected_to': + return _create_relationship(manager, physical_handle_id, other_handle_id, rel_type) + elif other_meta_type == 'Location' and rel_type == 'Located_in': + return _create_relationship(manager, physical_handle_id, other_handle_id, rel_type) + raise exceptions.NoRelationshipPossible(physical_handle_id, 'Physical', other_handle_id, other_meta_type, rel_type) + + +def create_relationship(manager, handle_id, other_handle_id, rel_type): + """ + Makes a relationship from node to other_node depending on which + meta_type the nodes are. Returns the relationship or raises + NoRelationshipPossible exception. + """ + meta_type = get_node_meta_type(manager, handle_id) + if meta_type == 'Location': + return create_location_relationship(manager, handle_id, other_handle_id, rel_type) + elif meta_type == 'Logical': + return create_logical_relationship(manager, handle_id, other_handle_id, rel_type) + elif meta_type == 'Relation': + return create_relation_relationship(manager, handle_id, other_handle_id, rel_type) + elif meta_type == 'Physical': + return create_physical_relationship(manager, handle_id, other_handle_id, rel_type) + other_meta_type = get_node_meta_type(manager, other_handle_id) + raise exceptions.NoRelationshipPossible(handle_id, meta_type, other_handle_id, other_meta_type, rel_type) + + +def get_relationships(manager, handle_id1, handle_id2, rel_type=None): + """ + Takes a start and an end node with an optional relationship + type. + Returns the relationships between the nodes or an empty list. + """ + if rel_type: + q = """ + MATCH (a:Node {{handle_id: $handle_id1}})-[r:{rel_type}]-(b:Node {{handle_id: $handle_id2}}) + RETURN collect(r) as relationships + """.format(rel_type=rel_type) + else: + q = """ + MATCH (a:Node {handle_id: $handle_id1})-[r]-(b:Node {handle_id: $handle_id2}) + RETURN collect(r) as relationships + """ + with manager.session as s: + return s.run(q, {'handle_id1': handle_id1, 'handle_id2': handle_id2}).single()['relationships'] + + +def set_node_properties(manager, handle_id, new_properties): + new_properties['handle_id'] = handle_id # Make sure the handle_id can't be changed + + q = """ + MATCH (n:Node {handle_id: $props.handle_id}) + SET n = $props + RETURN n + """ + with manager.session as s: + return neo4j_entity_to_dict(s.run(q, {'handle_id': handle_id, 'props': new_properties}).single()['n']) + + +def set_relationship_properties(manager, relationship_id, new_properties): + + q = """ + MATCH ()-[r]->() + WHERE ID(r) = $relationship_id + SET r = $props + RETURN r + """ + with manager.session as s: + return s.run(q, {'relationship_id': int(relationship_id), 'props': new_properties}).single() + + +def get_node_model(manager, handle_id=None, node=None): + """ + :param manager: Context manager to handle transactions + :type manager: Neo4jDBSessionManager + :param handle_id: Nodes handle id + :type handle_id: str|unicode + :param node: Node object + :type node: neo4j.types.Node + :return: Node model + :rtype: models.BaseNodeModel or sub class of models.BaseNodeModel + """ + bundle = get_node_bundle(manager, handle_id, node) + for label in bundle.get('labels'): + try: + classname = '{meta_type}{base}Model'.format(meta_type=bundle.get('meta_type'), base=label).replace('_', '') + return getattr(models, classname)(manager).load(bundle) + except AttributeError: + pass + for label in bundle.get('labels'): + try: + classname = '{base}Model'.format(base=label).replace('_', '') + return getattr(models, classname)(manager).load(bundle) + except AttributeError: + pass + try: + classname = '{base}Model'.format(base=bundle.get('meta_type')) + return getattr(models, classname)(manager).load(bundle) + except AttributeError: + return models.BaseNodeModel(manager).load(bundle) + + +def get_relationship_model(manager, relationship_id): + """ + :param manager: Context manager to handle transactions + :type manager: Neo4jDBSessionManager + :param relationship_id: Internal Neo4j relationship id + :type relationship_id: int + :return: Relationship model + :rtype: models.BaseRelationshipModel + """ + bundle = get_relationship_bundle(manager, relationship_id) + return models.BaseRelationshipModel(manager).load(bundle) diff --git a/norduniclient/exceptions.py b/norduniclient/exceptions.py new file mode 100644 index 0000000..d52b9b8 --- /dev/null +++ b/norduniclient/exceptions.py @@ -0,0 +1,116 @@ +# -*- coding: utf-8 -*- +""" +Created on Thu Oct 13 16:36:31 2011 + +@author: lundberg +""" + +from __future__ import absolute_import + + +class NoRelationshipPossible(Exception): + """ + Exception that explains why the nodes relationship was not possible. + """ + def __init__(self, handle_id1, meta_type1, handle_id2, meta_type2, relationship_type): + self.handle_id1 = handle_id1 + self.handle_id2 = handle_id2 + self.meta_type1 = meta_type1 + self.meta_type2 = meta_type2 + self.relationship_type = relationship_type + + def __str__(self): + node1_str = '{meta_type} Node ({handle_id})'.format(meta_type=self.meta_type1, handle_id=self.handle_id1) + node2_str = '{meta_type} Node ({handle_id})'.format(meta_type=self.meta_type2, handle_id=self.handle_id2) + return '%s %s %s is not possible.' % (node1_str, self.relationship_type, node2_str) + + +class MetaLabelNamingError(Exception): + """ + Exception that explains that meta labels must have special names defined + in create_node(). + """ + def __init__(self, name): + self.error = 'A meta label can not be named {name}.'.format(name=name) + + def __str__(self): + return self.error + + +class NoMetaLabelFound(Exception): + """ + All nodes need a meta type to function correctly in the NOCLook model. This + exception should be raised if the nodes meta node can't be found. + """ + def __init__(self, handle_id): + self.handle_id = handle_id + + def __str__(self): + return 'Node with handle_id {handle_id} has no meta label.'.format(handle_id=self.handle_id) + + +class UniqueNodeError(Exception): + """ + Should be raised when the user tries to create a new node that should be + unique for that node_name and node_type. + """ + def __init__(self, name, handle_id, node_type): + self.name = name + self.handle_id = handle_id + self.node_type = node_type + + def __str__(self): + return 'A node named {name} with node type {type} already exists. Handle ID: {id}'.format(name=self.name, + type=self.node_type, + id=self.handle_id) + + +class MultipleNodesReturned(Exception): + """ + If a user requests an unique node, by name and type, and multiple nodes are returned + this exception should be raised. + """ + def __init__(self, node_name, node_type): + self.node_name = node_name + self.node_type = node_type + + def __str__(self): + return 'Multiple nodes of name %s and type %s was returned.' % (self.node_name, + self.node_type) + + +class BadProperties(Exception): + """ + If a user tries to set node or relationship properties that are not Numeric values, + String values or Boolean values. + """ + def __init__(self, properties): + self.properties = properties + + def __str__(self): + return '''Tried to set {properties} as properties. +Only numeric values, string values or boolean values are allowed'''.format(properties=self.properties) + + +class NodeNotFound(Exception): + """ + The provided handle_id did not match any node in the graph database. + """ + def __init__(self, manager, handle_id): + self.message = '{handle_id} did not match a node in database at {db}.'.format(handle_id=handle_id, + db=manager.uri) + + def __str__(self): + return self.message + + +class RelationshipNotFound(Exception): + """ + The provided handle_id did not match any node in the graph database. + """ + def __init__(self, manager, relationship_id): + self.message = '{relationship_id} did not match a relationship in database at {db}.'.format( + relationship_id=relationship_id, db=manager.uri) + + def __str__(self): + return self.message diff --git a/norduniclient/helpers.py b/norduniclient/helpers.py new file mode 100644 index 0000000..94a7aa6 --- /dev/null +++ b/norduniclient/helpers.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import + +__author__ = 'lundberg' + + +def update_item_properties(item_properties, new_properties): + for key, value in new_properties.items(): + if value or value == 0: + item_properties[key] = value + elif key in item_properties.keys(): + del item_properties[key] + return item_properties + + +# TODO: Does this helper make any sense? +def merge_properties(item_properties, prop_name, merge_value): + """ + Tries to figure out which type of property value that should be merged and + invoke the right function. + Returns new properties if the merge was successful otherwise False. + """ + existing_value = item_properties.get(prop_name, None) + if not existing_value: # A node without existing values for the property + item_properties[prop_name] = merge_value + else: + if type(merge_value) is int or type(merge_value) is str: + item_properties[prop_name] = existing_value + merge_value + elif type(merge_value) is list: + item_properties[prop_name] = merge_list(existing_value, merge_value) + else: + return False + return item_properties + + +def merge_list(existing_value, new_value): + """ + Takes the name of a property, a list of new property values and the existing + node values. + Returns the merged properties. + """ + new_set = set(existing_value + new_value) + return list(new_set) diff --git a/norduniclient/models.py b/norduniclient/models.py new file mode 100644 index 0000000..a4c267f --- /dev/null +++ b/norduniclient/models.py @@ -0,0 +1,889 @@ +# -*- coding: utf-8 -*- + +from functools import total_ordering +from collections import defaultdict +try: + # Python 2 + import core +except ImportError: # Fix circular import in python 2 vs python 3 + # Python 3 + from norduniclient import core + +__author__ = 'lundberg' + + +@total_ordering +class BaseRelationshipModel(object): + + def __init__(self, manager): + self.manager = manager + self.id = None + self.type = None + self.data = None + self.start = None + self.end = None + + def __str__(self): + return u'({start})-[{id}:{type}{data}]->({end}) in database {db}.'.format( + start=self.start['handle_id'], type=self.type, id=self.id, data=self.data, end=self.end['handle_id'], + db=self.manager.uri + ) + + def __eq__(self, other): + return self.id == other.id + + def __lt__(self, other): + return self.id < other.id + + def __repr__(self): + return u'<{c} id:{id} in {db}>'.format(c=self.__class__.__name__, id=self.id, db=self.manager.uri) + + def load(self, relationship_bundle): + self.id = relationship_bundle.get('id') + self.type = relationship_bundle.get('type') + self.data = relationship_bundle.get('data', {}) + self.start = relationship_bundle.get('start') + self.end = relationship_bundle.get('end') + return self + + def delete(self): + core.delete_relationship(self.manager, self.id) + + +@total_ordering +class BaseNodeModel(object): + + def __init__(self, manager): + self.manager = manager + self.meta_type = None + self.labels = None + self.data = None + + def __str__(self): + labels = ':'.join(self.labels) + return u'(node:{meta_type}:{labels} {data}) in database {db}.'.format( + meta_type=self.meta_type, labels=labels, data=self.data, db=self.manager.uri + ) + + def __eq__(self, other): + return self.handle_id == other.handle_id + + def __lt__(self, other): + return self.handle_id < other.handle_id + + def __repr__(self): + return u'<{c} handle_id:{handle_id} in {db}>'.format(c=self.__class__.__name__, handle_id=self.handle_id, + db=self.manager.uri) + + def _get_handle_id(self): + return self.data.get('handle_id') + handle_id = property(_get_handle_id) + + def _incoming(self): + q = """ + MATCH (n:Node {handle_id: $handle_id})<-[r]-(node) + RETURN r, node + """ + return self._basic_read_query_to_dict(q) + incoming = property(_incoming) + + def _outgoing(self): + q = """ + MATCH (n:Node {handle_id: $handle_id})-[r]->(node) + RETURN r, node + """ + return self._basic_read_query_to_dict(q) + outgoing = property(_outgoing) + + def _relationships(self): + q = """ + MATCH (n:Node {handle_id: $handle_id})-[r]-(node) + RETURN r, node + """ + return self._basic_read_query_to_dict(q) + relationships = property(_relationships) + + def _basic_read_query_to_dict(self, query, **kwargs): + d = defaultdict(list) + with self.manager.session as s: + kwargs['handle_id'] = self.handle_id + result = s.run(query, kwargs) + for record in result: + relationship = record['r'] + node = record['node'] + key = relationship.type + if 'key' in record.keys(): + key = record['key'] + d[key].append({ + 'relationship_id': relationship.id, + 'relationship': relationship, + 'node': core.get_node_model(self.manager, node=node) + }) + d.default_factory = None + return d + + def _basic_write_query_to_dict(self, query, **kwargs): + d = defaultdict(list) + with self.manager.session as s: + kwargs['handle_id'] = self.handle_id + result = s.run(query, kwargs) + for record in result: + created = record['created'] + relationship = record['r'] + node = record['node'] + key = relationship.type + if 'key' in record.keys(): + key = record['key'] + d[key].append({ + 'created': created, + 'relationship_id': relationship.id, + 'relationship': relationship, + 'node': core.get_node_model(self.manager, node=node) + }) + d.default_factory = None + return d + + def load(self, node_bundle): + self.meta_type = node_bundle.get('meta_type') + self.labels = node_bundle.get('labels') + self.data = node_bundle.get('data') + return self + + def add_label(self, label): + q = """ + MATCH (n:Node {{handle_id: $handle_id}}) + SET n:{label} + RETURN n + """.format(label=label) + with self.manager.session as s: + node = s.run(q, {'handle_id': self.handle_id}).single()['n'] + return self.reload(node=node) + + def remove_label(self, label): + q = """ + MATCH (n:Node {{handle_id: $handle_id}}) + REMOVE n:{label} + RETURN n + """.format(label=label) + with self.manager.session as s: + node = s.run(q, {'handle_id': self.handle_id}).single()['n'] + return self.reload(node=node) + + def change_meta_type(self, meta_type): + if meta_type not in core.META_TYPES: + raise core.exceptions.MetaLabelNamingError(meta_type) + if meta_type == self.meta_type: + return self + model = self.remove_label(self.meta_type) + return model.add_label(meta_type) + + def switch_type(self, old_type, new_type): + if old_type == new_type: + return self + model = self.remove_label(old_type) + return model.add_label(new_type) + + def delete(self): + core.delete_node(self.manager, self.handle_id) + + def reload(self, node=None): + return core.get_node_model(self.manager, self.handle_id, node=node) + + +class CommonQueries(BaseNodeModel): + + def get_location_path(self): + return {'location_path': []} + + def get_placement_path(self): + return {'placement_path': []} + + def get_location(self): + return {} + + def get_child_form_data(self, node_type): + type_filter = '' + if node_type: + type_filter = 'and (child):{node_type}'.format(node_type=node_type) + q = """ + MATCH (parent:Node {{handle_id:$handle_id}}) + MATCH (parent)--(child) + WHERE (parent)-[:Has]->(child) or (parent)<-[:Located_in|Part_of]-(child) {type_filter} + RETURN child.handle_id as handle_id, labels(child) as labels, child.name as name, + child.description as description + """.format(type_filter=type_filter) + return core.query_to_list(self.manager, q, handle_id=self.handle_id) + + def get_relations(self): + q = """ + MATCH (n:Node {handle_id: $handle_id})<-[r:Owns|Uses|Provides|Responsible_for]-(node) + RETURN r, node + """ + return self._basic_read_query_to_dict(q) + + def get_dependencies(self): + q = """ + MATCH (n:Node {handle_id: $handle_id})-[r:Depends_on]->(node) + RETURN r, node + """ + return self._basic_read_query_to_dict(q) + + def get_dependents(self): + q = """ + MATCH (n:Node {handle_id: $handle_id})<-[r:Depends_on]-(node) + RETURN r, node + """ + return self._basic_read_query_to_dict(q) + + def get_dependent_as_types(self): + q = """ + MATCH (node:Node {handle_id: $handle_id}) + OPTIONAL MATCH (node)<-[:Depends_on]-(d) + WITH node, collect(DISTINCT d) as direct + OPTIONAL MATCH (node)<-[:Part_of|Depends_on*1..20]-(dep) + OPTIONAL MATCH (node)-[:Depends_on]->(p:Port)<-[:Part_of|Depends_on*1..20]-(port_deps) + WITH direct, collect(DISTINCT dep) + collect(DISTINCT port_deps) as deps + WITH direct, deps, [n in deps WHERE n:Service] as services + WITH direct, deps, services, [n in deps WHERE n:Optical_Path] as paths + WITH direct, deps, services, paths, [n in deps WHERE n:Optical_Multiplex_Section] as oms + WITH direct, deps, services, paths, oms, [n in deps WHERE n:Optical_Link] as links + RETURN direct, services, paths, oms, links + """ + return core.query_to_dict(self.manager, q, handle_id=self.handle_id) + + def get_dependencies_as_types(self): + q = """ + MATCH (node:Node {handle_id: $handle_id}) + OPTIONAL MATCH (node)-[:Depends_on]->(d) + WITH node, collect(DISTINCT d) as direct + MATCH (node)-[:Depends_on*1..20]->(dep) + WITH node, direct, collect(DISTINCT dep) as deps + WITH node, direct, deps, [n in deps WHERE n:Service] as services + WITH node, direct, deps, services, [n in deps WHERE n:Optical_Path] as paths + WITH node, direct, deps, services, paths, [n in deps WHERE n:Optical_Multiplex_Section] as oms + WITH node, direct, deps, services, paths, oms, [n in deps WHERE n:Optical_Link] as links + WITH node, direct, services, paths, oms, links + OPTIONAL MATCH (node)-[:Depends_on*1..20]->()-[:Connected_to*1..50]-(cable) + RETURN direct, services, paths, oms, links, [n in collect(DISTINCT cable) WHERE n:Cable] as cables + """ + return core.query_to_dict(self.manager, q, handle_id=self.handle_id) + + def get_ports(self): + q = """ + MATCH (node:Node {handle_id: $handle_id})-[r:Connected_to|Depends_on]-(port:Port) + WITH port, r + OPTIONAL MATCH p=(port)<-[:Has*1..]-(parent) + RETURN port, r as relationship, LAST(nodes(p)) as parent + ORDER BY parent.name + """ + return core.query_to_list(self.manager, q, handle_id=self.handle_id) + + +class LogicalModel(CommonQueries): + + def get_part_of(self): + q = """ + MATCH (n:Node {handle_id: $handle_id})-[r:Part_of]->(node) + RETURN r, node + """ + return self._basic_read_query_to_dict(q) + + def set_user(self, user_handle_id): + q = """ + MATCH (n:Node {handle_id: $handle_id}), (user:Node {handle_id: $user_handle_id}) + WITH n, user, NOT EXISTS((n)<-[:Uses]-(user)) as created + MERGE (n)<-[r:Uses]-(user) + RETURN created, r, user as node + """ + return self._basic_write_query_to_dict(q, user_handle_id=user_handle_id) + + def set_provider(self, provider_handle_id): + q = """ + MATCH (n:Node {handle_id: $handle_id}), (provider:Node {handle_id: $provider_handle_id}) + WITH n, provider, NOT EXISTS((n)<-[:Provides]-(provider)) as created + MERGE (n)<-[r:Provides]-(provider) + RETURN created, r, provider as node + """ + return self._basic_write_query_to_dict(q, provider_handle_id=provider_handle_id) + + def set_dependency(self, dependency_handle_id): + q = """ + MATCH (n:Node {handle_id: $handle_id}), (dependency:Node {handle_id: $dependency_handle_id}) + WITH n, dependency, NOT EXISTS((n)-[:Depends_on]->(dependency)) as created + MERGE (n)-[r:Depends_on]->(dependency) + RETURN created, r, dependency as node + """ + return self._basic_write_query_to_dict(q, dependency_handle_id=dependency_handle_id) + + # Logical versions of physical things can't have physical connections + def get_connections(self): + return [] + + # TODO: Create a method that complains if any relationships that breaks the model exists + + +class PhysicalModel(CommonQueries): + + def get_location(self): + q = """ + MATCH (n:Node {handle_id: $handle_id})-[r:Located_in]->(node) + RETURN r, node + """ + return self._basic_read_query_to_dict(q) + + def get_location_path(self): + # TODO: check if size(nodes(p))/size(path) in neo4j>=4.4 is equivalent to length(nodes(p))/length(path) in neo4j==3.5 + q = """ + MATCH (n:Node {handle_id: $handle_id})-[:Located_in]->(r) + MATCH p=()-[:Has*0..20]->(r) + WITH COLLECT(nodes(p)) as paths, MAX(size(nodes(p))) AS maxLength + WITH [path IN paths WHERE size(path)=maxLength] AS longestPaths + UNWIND(longestPaths) as location_path + RETURN location_path + """ + return core.query_to_dict(self.manager, q, handle_id=self.handle_id) + + def get_placement_path(self): + # TODO: check if size(nodes(p))/size(path) in neo4j>=4.4 is equivalent to length(nodes(p))/length(path) in neo4j==3.5 + q = """ + MATCH (n:Node {handle_id: $handle_id})<-[:Has]-(parent) + OPTIONAL MATCH p=()-[:Has*0..20]->(parent) + WITH COLLECT(nodes(p)) as paths, MAX(size(nodes(p))) AS maxLength + WITH [path IN paths WHERE size(path)=maxLength] AS longestPaths + UNWIND(longestPaths) as placement_path + RETURN placement_path + """ + return core.query_to_dict(self.manager, q, handle_id=self.handle_id) + + def set_owner(self, owner_handle_id): + q = """ + MATCH (n:Node {handle_id: $handle_id}), (owner:Node {handle_id: $owner_handle_id}) + WITH n, owner, NOT EXISTS((n)<-[:Owns]-(owner)) as created + MERGE (n)<-[r:Owns]-(owner) + RETURN created, r, owner as node + """ + return self._basic_write_query_to_dict(q, owner_handle_id=owner_handle_id) + + def set_provider(self, provider_handle_id): + q = """ + MATCH (n:Node {handle_id: $handle_id}), (provider:Node {handle_id: $provider_handle_id}) + WITH n, provider, NOT EXISTS((n)<-[:Provides]-(provider)) as created + MERGE (n)<-[r:Provides]-(provider) + RETURN created, r, provider as node + """ + return self._basic_write_query_to_dict(q, provider_handle_id=provider_handle_id) + + def set_location(self, location_handle_id): + q = """ + MATCH (n:Node {handle_id: $handle_id}), (location:Node {handle_id: $location_handle_id}) + WITH n, location, NOT EXISTS((n)-[:Located_in]->(location)) as created + MERGE (n)-[r:Located_in]->(location) + RETURN created, r, location as node + """ + return self._basic_write_query_to_dict(q, location_handle_id=location_handle_id) + + def get_has(self): + q = """ + MATCH (n:Node {handle_id: $handle_id})-[r:Has]->(part:Physical) + RETURN r, part as node + """ + return self._basic_read_query_to_dict(q) + + def set_has(self, has_handle_id): + q = """ + MATCH (n:Node {handle_id: $handle_id}), (part:Node {handle_id: $has_handle_id}) + WITH n, part, NOT EXISTS((n)-[:Has]->(part)) as created + MERGE (n)-[r:Has]->(part) + RETURN created, r, part as node + """ + return self._basic_write_query_to_dict(q, has_handle_id=has_handle_id) + + def get_part_of(self): + q = """ + MATCH (n:Node {handle_id: $handle_id})<-[r:Part_of]-(part:Logical) + RETURN r, part as node + """ + return self._basic_read_query_to_dict(q) + + def set_part_of(self, part_handle_id): + q = """ + MATCH (n:Node {handle_id: $handle_id}), (part:Node:Logical {handle_id: $part_handle_id}) + WITH n, part, NOT EXISTS((n)<-[:Part_of]-(part)) as created + MERGE (n)<-[r:Part_of]-(part) + RETURN created, r, part as node + """ + return self._basic_write_query_to_dict(q, part_handle_id=part_handle_id) + + def get_parent(self): + q = """ + MATCH (n:Node {handle_id: $handle_id})<-[r:Has]-(parent) + RETURN r, parent as node + """ + return self._basic_read_query_to_dict(q) + + # TODO: Create a method that complains if any relationships that breaks the model exists + + +class LocationModel(CommonQueries): + + def get_location_path(self): + # TODO: check if size(nodes(p))/size(path) in neo4j>=4.4 is equivalent to length(nodes(p))/length(path) in neo4j==3.5 + q = """ + MATCH (n:Node {handle_id: $handle_id})<-[:Has]-(r) + MATCH p=()-[:Has*0..20]->(r) + WITH COLLECT(nodes(p)) as paths, MAX(size(nodes(p))) AS maxLength + WITH [path IN paths WHERE size(path)=maxLength] AS longestPaths + UNWIND(longestPaths) as location_path + RETURN location_path + """ + return core.query_to_dict(self.manager, q, handle_id=self.handle_id) + + def get_parent(self): + q = """ + MATCH (n:Node {handle_id: $handle_id})<-[r:Has]-(parent) + RETURN r, parent as node + """ + return self._basic_read_query_to_dict(q) + + def get_located_in(self): + q = """ + MATCH (n:Node {handle_id: $handle_id})<-[r:Located_in]-(node) + RETURN r, node + """ + return self._basic_read_query_to_dict(q) + + def get_has(self): + q = """ + MATCH (n:Node {handle_id: $handle_id})-[r:Has]->(node:Location) + RETURN r, node + """ + return self._basic_read_query_to_dict(q) + + def set_has(self, has_handle_id): + q = """ + MATCH (n:Node {handle_id: $handle_id}), (part:Node {handle_id: $has_handle_id}) + WITH n, part, NOT EXISTS((n)-[:Has]->(part)) as created + MERGE (n)-[r:Has]->(part) + RETURN created, r, part as node + """ + return self._basic_write_query_to_dict(q, has_handle_id=has_handle_id) + + def set_responsible_for(self, owner_handle_id): + q = """ + MATCH (n:Node {handle_id: $handle_id}), (owner:Node {handle_id: $owner_handle_id}) + WITH n, owner, NOT EXISTS((n)<-[:Responsible_for]-(owner)) as created + MERGE (n)<-[r:Responsible_for]-(owner) + RETURN created, r, owner as node + """ + return self._basic_write_query_to_dict(q, owner_handle_id=owner_handle_id) + + +class RelationModel(CommonQueries): + + def with_same_name(self): + q = """ + MATCH (n:Node {handle_id: $handle_id}), (other:Node:Relation {name: $name}) + WHERE other.handle_id <> n.handle_id + RETURN COLLECT(other.handle_id) as ids + """ + return core.query_to_dict(self.manager, q, handle_id=self.handle_id, name=self.data.get('name')) + + def get_uses(self): + q = """ + MATCH (n:Node {handle_id: $handle_id})-[r:Uses]->(usable) + RETURN r, usable as node + """ + return self._basic_read_query_to_dict(q) + + def get_provides(self): + q = """ + MATCH (n:Node {handle_id: $handle_id})-[r:Provides]->(usable) + RETURN r, usable as node + """ + return self._basic_read_query_to_dict(q) + + def get_owns(self): + q = """ + MATCH (n:Node {handle_id: $handle_id})-[r:Owns]->(usable) + RETURN r, usable as node + """ + return self._basic_read_query_to_dict(q) + + def get_responsible_for(self): + q = """ + MATCH (n:Node {handle_id: $handle_id})-[r:Responsible_for]->(usable) + RETURN r, usable as node + """ + return self._basic_read_query_to_dict(q) + + +class EquipmentModel(PhysicalModel): + + def get_ports(self): + q = """ + MATCH (n:Node {handle_id: $handle_id})-[r:Has]->(port:Port) + RETURN r, port as node + """ + return self._basic_read_query_to_dict(q) + + def get_port(self, port_name): + q = """ + MATCH (n:Node {handle_id: $handle_id})-[r:Has]->(port:Port) + WHERE port.name = $port_name + RETURN r, port as node + """ + return self._basic_read_query_to_dict(q, port_name=port_name) + + def get_dependent_as_types(self): + # The + [null] is to handle both dep lists being emtpy since UNWIND gives 0 rows on unwind + q = """ + MATCH (node:Node {handle_id: $handle_id}) + OPTIONAL MATCH (node)<-[:Depends_on]-(d) + WITH node, collect(DISTINCT d) as direct + OPTIONAL MATCH (node)-[:Has*1..20]->()<-[:Part_of|Depends_on*1..20]-(dep) + OPTIONAL MATCH (node)-[:Has*1..20]->()<-[:Connected_to]-()-[:Connected_to]->()<-[:Depends_on*1..20]-(cable_dep) + WITH direct, collect(DISTINCT dep) + collect(DISTINCT cable_dep) + direct as coll + UNWIND coll AS x + WITH direct, collect(DISTINCT x) as deps + WITH direct, deps, [n in deps WHERE n:Service] as services + WITH direct, deps, services, [n in deps WHERE n:Optical_Path] as paths + WITH direct, deps, services, paths, [n in deps WHERE n:Optical_Multiplex_Section] as oms + WITH direct, deps, services, paths, oms, [n in deps WHERE n:Optical_Link] as links + RETURN direct, services, paths, oms, links + """ + return core.query_to_dict(self.manager, q, handle_id=self.handle_id) + + def get_connections(self): + q = """ + MATCH (n:Node {handle_id: $handle_id})-[:Has*1..10]->(porta:Port) + OPTIONAL MATCH (porta)<-[r0:Connected_to]-(cable) + OPTIONAL MATCH (cable)-[r1:Connected_to]->(portb:Port) + WHERE ID(r1) <> ID(r0) + OPTIONAL MATCH (portb)<-[:Has*1..10]-(end) + WITH porta, r0, cable, portb, r1, last(collect(end)) as end + OPTIONAL MATCH (end)-[:Located_in]->(location) + OPTIONAL MATCH (location)<-[:Has]-(site) + RETURN porta, r0, cable, r1, portb, end, location, site + """ + return core.query_to_list(self.manager, q, handle_id=self.handle_id) + + +class SubEquipmentModel(PhysicalModel): + + def get_location_path(self): + # TODO: check if size(nodes(p))/size(path) in neo4j>=4.4 is equivalent to length(nodes(p))/length(path) in neo4j==3.5 + q = """ + MATCH (n:Node {handle_id: $handle_id})<-[:Has]-(parent) + OPTIONAL MATCH p=()-[:Has*0..20]->(r)<-[:Located_in]-()-[:Has*0..20]->(parent) + WITH COLLECT(nodes(p)) as paths, MAX(size(nodes(p))) AS maxLength + WITH [path IN paths WHERE size(path)=maxLength] AS longestPaths + UNWIND(longestPaths) as location_path + RETURN location_path + """ + return core.query_to_dict(self.manager, q, handle_id=self.handle_id) + + def get_connections(self): + q = """ + MATCH (porta:Node {handle_id: $handle_id})<-[r0:Connected_to]-(cable) + OPTIONAL MATCH (porta)<-[r0:Connected_to]-(cable)-[r1:Connected_to]->(portb) + OPTIONAL MATCH (portb)<-[:Has*1..10]-(end) + WITH porta, r0, cable, portb, r1, last(collect(end)) as end + OPTIONAL MATCH (end)-[:Located_in]->(location) + OPTIONAL MATCH (location)<-[:Has]-(site) + RETURN porta, r0, cable, r1, portb, end, location, site + """ + return core.query_to_list(self.manager, q, handle_id=self.handle_id) + + +class HostModel(CommonQueries): + + def get_dependent_as_types(self): + q = """ + MATCH (node:Node {handle_id: $handle_id}) + OPTIONAL MATCH (node)<-[:Depends_on]-(d) + WITH node, [n in collect(DISTINCT d)] as direct + MATCH (node)<-[:Depends_on*1..20]-(dep) + WITH direct, collect(DISTINCT dep) as deps + WITH direct, deps, [n in deps WHERE n:Service] as services + WITH direct, deps, services, [n in deps WHERE n:Optical_Path] as paths + WITH direct, deps, services, paths, [n in deps WHERE n:Optical_Multiplex_Section] as oms + WITH direct, deps, services, paths, oms, [n in deps WHERE n:Optical_Link] as links + RETURN direct, services, paths, oms, links + """ + + return core.query_to_dict(self.manager, q, handle_id=self.handle_id) + + +class PhysicalHostModel(HostModel, EquipmentModel): + pass + + +class LogicalHostModel(HostModel, LogicalModel): + pass + + +class PortModel(SubEquipmentModel): + + def get_units(self): + q = """ + MATCH (n:Node {handle_id: $handle_id})<-[r:Part_of]-(unit:Unit) + RETURN r, unit as node + """ + return self._basic_read_query_to_dict(q) + + def get_unit(self, unit_name): + q = """ + MATCH (n:Node {handle_id: $handle_id})<-[r:Part_of]-(unit:Unit) + WHERE unit.name = $unit_name + RETURN r, unit as node + """ + return self._basic_read_query_to_dict(q, unit_name=unit_name) + + def get_connected_to(self): + q = """ + MATCH (n:Node {handle_id: $handle_id})<-[r:Connected_to]-(cable:Cable) + RETURN r, cable as node + """ + return self._basic_read_query_to_dict(q) + + def get_connection_path(self): + q = """ + MATCH (n:Port {handle_id: $handle_id})-[:Connected_to*0..20]-(port:Port) + OPTIONAL MATCH path=(port)-[:Connected_to*]-() + WITH nodes(path) AS parts, length(path) AS len + ORDER BY len DESC + LIMIT 1 + UNWIND parts AS part + OPTIONAL MATCH (part)<-[:Has*1..20]-(parent) + WHERE NOT (parent)<-[:Has]-() + RETURN part, parent + """ + return core.query_to_list(self.manager, q, handle_id=self.handle_id) + + +class OpticalNodeModel(EquipmentModel): + pass + + +class RouterModel(EquipmentModel): + + def get_child_form_data(self, node_type=None): + if node_type: + type_filter = ':{node_type}'.format(node_type=node_type) + else: + type_filter = ':Port' + q = """ + MATCH (parent:Node {{handle_id: $handle_id}}) + MATCH (parent)-[:Has*]->(child{type_filter}) + RETURN child.handle_id as handle_id, labels(child) as labels, child.name as name, + child.description as description + ORDER BY child.name + """.format(type_filter=type_filter) + return core.query_to_list(self.manager, q, handle_id=self.handle_id) + + +class PeeringPartnerModel(RelationModel): + + def get_peering_groups(self): + q = """ + MATCH (host:Node {handle_id: $handle_id})-[r:Uses]->(group:Peering_Group) + RETURN r, group as node + """ + return self._basic_read_query_to_dict(q) + + def get_peering_group(self, group_handle_id, ip_address): + q = """ + MATCH (n:Node {handle_id: $handle_id})-[r:Uses]->(group:Node {handle_id: $group_handle_id}) + WHERE r.ip_address=$ip_address + RETURN r, group as node + """ + return self._basic_read_query_to_dict(q, group_handle_id=group_handle_id, ip_address=ip_address) + + def set_peering_group(self, group_handle_id, ip_address): + q = """ + MATCH (n:Node {handle_id: $handle_id}), (group:Node {handle_id: $group_handle_id}) + CREATE (n)-[r:Uses {ip_address:$ip_address}]->(group) + RETURN true as created, r, group as node + """ + return self._basic_write_query_to_dict(q, group_handle_id=group_handle_id, ip_address=ip_address) + + +class PeeringGroupModel(LogicalModel): + + def get_group_dependency(self, dependency_handle_id, ip_address): + q = """ + MATCH (n:Node {handle_id: $handle_id})-[r:Depends_on]->(dependency:Node {handle_id: $dependency_handle_id}) + WHERE r.ip_address=$ip_address + RETURN r, dependency as node + """ + return self._basic_read_query_to_dict(q, dependency_handle_id=dependency_handle_id, ip_address=ip_address) + + def set_group_dependency(self, dependency_handle_id, ip_address): + q = """ + MATCH (n:Node {handle_id: $handle_id}), (dependency:Node {handle_id: $dependency_handle_id}) + CREATE (n)-[r:Depends_on {ip_address:$ip_address}]->(dependency) + RETURN true as created, r, dependency as node + """ + return self._basic_write_query_to_dict(q, dependency_handle_id=dependency_handle_id, ip_address=ip_address) + + +class CableModel(PhysicalModel): + + def get_connected_equipment(self): + q = """ + MATCH (n:Node {handle_id: $handle_id})-[rel:Connected_to]->(port) + OPTIONAL MATCH (port)<-[:Has*1..10]-(end) + WITH rel, port, last(collect(end)) as end + OPTIONAL MATCH (end)-[:Located_in]->(location) + OPTIONAL MATCH (location)<-[:Has]-(site) + RETURN id(rel) as rel_id, rel, port, end, location, site + ORDER BY end.name, port.name + """ + return core.query_to_list(self.manager, q, handle_id=self.handle_id) + + def get_dependent_as_types(self): + q = """ + MATCH (n:Node {handle_id: $handle_id})-[:Connected_to*1..20]-(equip) + WITH DISTINCT equip + MATCH (equip)<-[:Part_of|Depends_on*1..10]-(dep) + WITH collect(DISTINCT dep) as deps + WITH deps, [n in deps WHERE n:Service] as services + WITH deps, services, [n in deps WHERE n:Optical_Path] as paths + WITH deps, services, paths, [n in deps WHERE n:Optical_Multiplex_Section] as oms + WITH deps, services, paths, oms, [n in deps WHERE n:Optical_Link] as links + RETURN services, paths, oms, links + """ + return core.query_to_dict(self.manager, q, handle_id=self.handle_id) + + def get_services(self): + q = """ + MATCH (n:Node {handle_id: $handle_id}) + MATCH (n)-[:Connected_to*1..20]-(equip) + WITH equip + MATCH (equip)<-[:Depends_on*1..10]-(service) + WHERE service:Service + WITH distinct service + OPTIONAL MATCH (service)<-[:Uses]-(user) + RETURN service, collect(user) as users + """ + return core.query_to_list(self.manager, q, handle_id=self.handle_id) + + def get_connection_path(self): + q = """ + MATCH (n:Cable {handle_id: $handle_id})-[:Connected_to*1..10]-(port:Port) + OPTIONAL MATCH path=(port)-[:Connected_to*]-() + WITH nodes(path) AS parts, length(path) AS len + ORDER BY len DESC + LIMIT 1 + UNWIND parts AS part + OPTIONAL MATCH (part)<-[:Has*1..10]-(parent) + WHERE NOT (parent)<-[:Has]-() + RETURN part, parent + """ + return core.query_to_list(self.manager, q, handle_id=self.handle_id) + + def set_connected_to(self, connected_to_handle_id): + q = """ + MATCH (n:Node {handle_id: $handle_id}), (part:Node {handle_id: $connected_to_handle_id}) + WITH n, part, NOT EXISTS((n)-[:Connected_to]->(part)) as created + MERGE (n)-[r:Connected_to]->(part) + RETURN created, r, part as node + """ + return self._basic_write_query_to_dict(q, connected_to_handle_id=connected_to_handle_id) + + +class UnitModel(LogicalModel): + + def get_placement_path(self): + # TODO: check if size(nodes(p))/size(path) in neo4j>=4.4 is equivalent to length(nodes(p))/length(path) in neo4j==3.5 + q = """ + MATCH (n:Node {handle_id: $handle_id})-[:Part_of]->(parent) + OPTIONAL MATCH p=()-[:Has*0..20]->(parent) + WITH COLLECT(nodes(p)) as paths, MAX(size(nodes(p))) AS maxLength + WITH [path IN paths WHERE size(path)=maxLength] AS longestPaths + UNWIND(longestPaths) as placement_path + RETURN placement_path + """ + return core.query_to_dict(self.manager, q, handle_id=self.handle_id) + + def get_location_path(self): + # TODO: check if size(nodes(p))/size(path) in neo4j>=4.4 is equivalent to length(nodes(p))/length(path) in neo4j==3.5 + q = """ + MATCH (n:Node {handle_id: $handle_id})-[:Part_of]->(parent) + OPTIONAL MATCH p=()-[:Has*0..20]->(r)<-[:Located_in]-()-[:Has*0..20]->(parent) + WITH COLLECT(nodes(p)) as paths, MAX(size(nodes(p))) AS maxLength + WITH [path IN paths WHERE size(path)=maxLength] AS longestPaths + UNWIND(longestPaths) as location_path + RETURN location_path + """ + return core.query_to_dict(self.manager, q, handle_id=self.handle_id) + + +class ServiceModel(LogicalModel): + + def get_customers(self): + q = """ + MATCH (n:Node {handle_id: $handle_id})<-[r:Owns|Uses]-(customer:Customer) + RETURN "customers" as key, r, customer as node + """ + return self._basic_read_query_to_dict(q) + + +class OpticalPathModel(LogicalModel): + pass + + +class OpticalMultiplexSection(LogicalModel): + pass + + +class OpticalLinkModel(LogicalModel): + pass + + +class ExternalEquipmentModel(EquipmentModel): + pass + + +class ODFModel(EquipmentModel): + pass + + +class OpticalFilterModel(EquipmentModel): + pass + + +class SwitchModel(EquipmentModel, HostModel): + pass + + +class FirewallModel(EquipmentModel, HostModel): + pass + + +class PDUModel(EquipmentModel, HostModel): + pass + + +class PICModel(SubEquipmentModel): + pass + + +class FPCModel(SubEquipmentModel): + pass + + +class CustomerModel(RelationModel): + pass + + +class ProviderModel(RelationModel): + pass + + +class PatchPanelModel(EquipmentModel): + pass + + +class OutletModel(EquipmentModel): + pass diff --git a/norduniclient/testing.py b/norduniclient/testing.py new file mode 100644 index 0000000..bd0476e --- /dev/null +++ b/norduniclient/testing.py @@ -0,0 +1,95 @@ +# -*- coding: utf-8 -*- + +from __future__ import absolute_import + +import os +import unittest +import time +import atexit +from socket import error as SocketError + +from norduniclient.core import init_db +import collections +collections.Callable = collections.abc.Callable + +__author__ = 'lundberg' + + +class Neo4jTemporaryInstance(object): + """ + Singleton to manage a temporary Neo4j instance + + Use this for testing purpose only. The instance is automatically destroyed + at the end of the program. + + """ + _instance = None + _http_port = None + _bolt_port = None + + @classmethod + def get_instance(cls): + if cls._instance is None: + cls._instance = cls() + atexit.register(cls._instance.shutdown) + return cls._instance + + def __init__(self): + self._host = os.environ.get("NEO4J_HOSTNAME", "localhost") + self._http_port = os.environ.get("NEO4J_HTTP_PORT", 7474) + self._bolt_port = os.environ.get("NEO4J_BOLT_PORT", 7687) + self._neo4j_password = os.environ.get("NEO4J_PASSWORD", "neo4j") + self._neo4j_user = os.environ.get("NEO4J_USER", "neo4j") + + for i in range(300): + time.sleep(0.5) + try: + self._db = init_db('bolt://{!s}:{!s}'.format(self.host, self.bolt_port), username=self._neo4j_user, + password=self._neo4j_password, encrypted=False) + except SocketError: + continue + else: + break + else: + self.shutdown() + assert False, 'Cannot connect to the neo4j test instance' + + @property + def db(self): + return self._db + + @property + def host(self): + return self._host + + @property + def http_port(self): + return self._http_port + + @property + def bolt_port(self): + return self._bolt_port + + def purge_db(self): + q = """ + MATCH (n:Node) + OPTIONAL MATCH (n)-[r]-() + DELETE n,r + """ + with self.db.session as s: + s.run(q) + + def shutdown(self): + pass + + +class Neo4jTestCase(unittest.TestCase): + """ + Base test case that sets up a temporary Neo4j instance + """ + + neo4j_instance = Neo4jTemporaryInstance.get_instance() + neo4jdb = neo4j_instance.db + + def tearDown(self): + self.neo4j_instance.purge_db() diff --git a/noxfile.py b/noxfile.py new file mode 100644 index 0000000..409e8fe --- /dev/null +++ b/noxfile.py @@ -0,0 +1,40 @@ +import os +import nox + + +@nox.session(python=["3.9"]) +def tests(session): + # Install dev dependencies + session.install("neo4j==4.4.12") + # Set environment variables using os.environ + os.environ["NEO4J_HTTP_PORT"] = "7476" + os.environ["NEO4J_BOLT_PORT"] = "7689" + os.environ["NEO4J_HOSTNAME"] = "localhost" + os.environ["NEO4J_USER"] = "neo4j" + os.environ["NEO4J_PASSWORD"] = "" + session.run("python", "-m", "unittest", "discover", "-s", "tests") + + +@nox.session(python=["3.9"]) +def tests_dotenv(session): + session.install("neo4j==4.4.12") + session.run("dotenvx", "run", "--", "python", "-m", "unittest", "discover", "-s", "tests") + + +@nox.session(python=["3.9"]) +def lint(session): + session.install("flake8") + session.install("flake8-pyproject") + session.run("flake8", "norduniclient") + + +@nox.session(python=["3.9"]) +def typecheck(session): + session.install("mypy") + session.run("mypy", "norduniclient") + + +@nox.session(python=["3.9"]) +def format(session): + session.install("black") + session.run("black", "--check", "norduniclient") diff --git a/poetry.lock b/poetry.lock new file mode 100644 index 0000000..a1df271 --- /dev/null +++ b/poetry.lock @@ -0,0 +1,546 @@ +# This file is automatically @generated by Poetry 1.7.1 and should not be changed by hand. + +[[package]] +name = "argcomplete" +version = "3.5.1" +description = "Bash tab completion for argparse" +optional = false +python-versions = ">=3.8" +files = [ + {file = "argcomplete-3.5.1-py3-none-any.whl", hash = "sha256:1a1d148bdaa3e3b93454900163403df41448a248af01b6e849edc5ac08e6c363"}, + {file = "argcomplete-3.5.1.tar.gz", hash = "sha256:eb1ee355aa2557bd3d0145de7b06b2a45b0ce461e1e7813f5d066039ab4177b4"}, +] + +[package.extras] +test = ["coverage", "mypy", "pexpect", "ruff", "wheel"] + +[[package]] +name = "black" +version = "24.10.0" +description = "The uncompromising code formatter." +optional = false +python-versions = ">=3.9" +files = [ + {file = "black-24.10.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e6668650ea4b685440857138e5fe40cde4d652633b1bdffc62933d0db4ed9812"}, + {file = "black-24.10.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1c536fcf674217e87b8cc3657b81809d3c085d7bf3ef262ead700da345bfa6ea"}, + {file = "black-24.10.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:649fff99a20bd06c6f727d2a27f401331dc0cc861fb69cde910fe95b01b5928f"}, + {file = "black-24.10.0-cp310-cp310-win_amd64.whl", hash = "sha256:fe4d6476887de70546212c99ac9bd803d90b42fc4767f058a0baa895013fbb3e"}, + {file = "black-24.10.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5a2221696a8224e335c28816a9d331a6c2ae15a2ee34ec857dcf3e45dbfa99ad"}, + {file = "black-24.10.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f9da3333530dbcecc1be13e69c250ed8dfa67f43c4005fb537bb426e19200d50"}, + {file = "black-24.10.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4007b1393d902b48b36958a216c20c4482f601569d19ed1df294a496eb366392"}, + {file = "black-24.10.0-cp311-cp311-win_amd64.whl", hash = "sha256:394d4ddc64782e51153eadcaaca95144ac4c35e27ef9b0a42e121ae7e57a9175"}, + {file = "black-24.10.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:b5e39e0fae001df40f95bd8cc36b9165c5e2ea88900167bddf258bacef9bbdc3"}, + {file = "black-24.10.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d37d422772111794b26757c5b55a3eade028aa3fde43121ab7b673d050949d65"}, + {file = "black-24.10.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:14b3502784f09ce2443830e3133dacf2c0110d45191ed470ecb04d0f5f6fcb0f"}, + {file = "black-24.10.0-cp312-cp312-win_amd64.whl", hash = "sha256:30d2c30dc5139211dda799758559d1b049f7f14c580c409d6ad925b74a4208a8"}, + {file = "black-24.10.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:1cbacacb19e922a1d75ef2b6ccaefcd6e93a2c05ede32f06a21386a04cedb981"}, + {file = "black-24.10.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1f93102e0c5bb3907451063e08b9876dbeac810e7da5a8bfb7aeb5a9ef89066b"}, + {file = "black-24.10.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ddacb691cdcdf77b96f549cf9591701d8db36b2f19519373d60d31746068dbf2"}, + {file = "black-24.10.0-cp313-cp313-win_amd64.whl", hash = "sha256:680359d932801c76d2e9c9068d05c6b107f2584b2a5b88831c83962eb9984c1b"}, + {file = "black-24.10.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:17374989640fbca88b6a448129cd1745c5eb8d9547b464f281b251dd00155ccd"}, + {file = "black-24.10.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:63f626344343083322233f175aaf372d326de8436f5928c042639a4afbbf1d3f"}, + {file = "black-24.10.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ccfa1d0cb6200857f1923b602f978386a3a2758a65b52e0950299ea014be6800"}, + {file = "black-24.10.0-cp39-cp39-win_amd64.whl", hash = "sha256:2cd9c95431d94adc56600710f8813ee27eea544dd118d45896bb734e9d7a0dc7"}, + {file = "black-24.10.0-py3-none-any.whl", hash = "sha256:3bb2b7a1f7b685f85b11fed1ef10f8a9148bceb49853e47a294a3dd963c1dd7d"}, + {file = "black-24.10.0.tar.gz", hash = "sha256:846ea64c97afe3bc677b761787993be4991810ecc7a4a937816dd6bddedc4875"}, +] + +[package.dependencies] +click = ">=8.0.0" +mypy-extensions = ">=0.4.3" +packaging = ">=22.0" +pathspec = ">=0.9.0" +platformdirs = ">=2" +tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} +typing-extensions = {version = ">=4.0.1", markers = "python_version < \"3.11\""} + +[package.extras] +colorama = ["colorama (>=0.4.3)"] +d = ["aiohttp (>=3.10)"] +jupyter = ["ipython (>=7.8.0)", "tokenize-rt (>=3.2.0)"] +uvloop = ["uvloop (>=0.15.2)"] + +[[package]] +name = "cfgv" +version = "3.4.0" +description = "Validate configuration and produce human readable error messages." +optional = false +python-versions = ">=3.8" +files = [ + {file = "cfgv-3.4.0-py2.py3-none-any.whl", hash = "sha256:b7265b1f29fd3316bfcd2b330d63d024f2bfd8bcb8b0272f8e19a504856c48f9"}, + {file = "cfgv-3.4.0.tar.gz", hash = "sha256:e52591d4c5f5dead8e0f673fb16db7949d2cfb3f7da4582893288f0ded8fe560"}, +] + +[[package]] +name = "click" +version = "8.1.7" +description = "Composable command line interface toolkit" +optional = false +python-versions = ">=3.7" +files = [ + {file = "click-8.1.7-py3-none-any.whl", hash = "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28"}, + {file = "click-8.1.7.tar.gz", hash = "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "platform_system == \"Windows\""} + +[[package]] +name = "colorama" +version = "0.4.6" +description = "Cross-platform colored terminal text." +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +files = [ + {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, + {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, +] + +[[package]] +name = "colorlog" +version = "6.8.2" +description = "Add colours to the output of Python's logging module." +optional = false +python-versions = ">=3.6" +files = [ + {file = "colorlog-6.8.2-py3-none-any.whl", hash = "sha256:4dcbb62368e2800cb3c5abd348da7e53f6c362dda502ec27c560b2e58a66bd33"}, + {file = "colorlog-6.8.2.tar.gz", hash = "sha256:3e3e079a41feb5a1b64f978b5ea4f46040a94f11f0e8bbb8261e3dbbeca64d44"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "sys_platform == \"win32\""} + +[package.extras] +development = ["black", "flake8", "mypy", "pytest", "types-colorama"] + +[[package]] +name = "distlib" +version = "0.3.9" +description = "Distribution utilities" +optional = false +python-versions = "*" +files = [ + {file = "distlib-0.3.9-py2.py3-none-any.whl", hash = "sha256:47f8c22fd27c27e25a65601af709b38e4f0a45ea4fc2e710f65755fa8caaaf87"}, + {file = "distlib-0.3.9.tar.gz", hash = "sha256:a60f20dea646b8a33f3e7772f74dc0b2d0772d2837ee1342a00645c81edf9403"}, +] + +[[package]] +name = "exceptiongroup" +version = "1.2.2" +description = "Backport of PEP 654 (exception groups)" +optional = false +python-versions = ">=3.7" +files = [ + {file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"}, + {file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"}, +] + +[package.extras] +test = ["pytest (>=6)"] + +[[package]] +name = "filelock" +version = "3.16.1" +description = "A platform independent file lock." +optional = false +python-versions = ">=3.8" +files = [ + {file = "filelock-3.16.1-py3-none-any.whl", hash = "sha256:2082e5703d51fbf98ea75855d9d5527e33d8ff23099bec374a134febee6946b0"}, + {file = "filelock-3.16.1.tar.gz", hash = "sha256:c249fbfcd5db47e5e2d6d62198e565475ee65e4831e2561c8e313fa7eb961435"}, +] + +[package.extras] +docs = ["furo (>=2024.8.6)", "sphinx (>=8.0.2)", "sphinx-autodoc-typehints (>=2.4.1)"] +testing = ["covdefaults (>=2.3)", "coverage (>=7.6.1)", "diff-cover (>=9.2)", "pytest (>=8.3.3)", "pytest-asyncio (>=0.24)", "pytest-cov (>=5)", "pytest-mock (>=3.14)", "pytest-timeout (>=2.3.1)", "virtualenv (>=20.26.4)"] +typing = ["typing-extensions (>=4.12.2)"] + +[[package]] +name = "flake8" +version = "7.1.1" +description = "the modular source code checker: pep8 pyflakes and co" +optional = false +python-versions = ">=3.8.1" +files = [ + {file = "flake8-7.1.1-py2.py3-none-any.whl", hash = "sha256:597477df7860daa5aa0fdd84bf5208a043ab96b8e96ab708770ae0364dd03213"}, + {file = "flake8-7.1.1.tar.gz", hash = "sha256:049d058491e228e03e67b390f311bbf88fce2dbaa8fa673e7aea87b7198b8d38"}, +] + +[package.dependencies] +mccabe = ">=0.7.0,<0.8.0" +pycodestyle = ">=2.12.0,<2.13.0" +pyflakes = ">=3.2.0,<3.3.0" + +[[package]] +name = "identify" +version = "2.6.1" +description = "File identification library for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "identify-2.6.1-py2.py3-none-any.whl", hash = "sha256:53863bcac7caf8d2ed85bd20312ea5dcfc22226800f6d6881f232d861db5a8f0"}, + {file = "identify-2.6.1.tar.gz", hash = "sha256:91478c5fb7c3aac5ff7bf9b4344f803843dc586832d5f110d672b19aa1984c98"}, +] + +[package.extras] +license = ["ukkonen"] + +[[package]] +name = "iniconfig" +version = "2.0.0" +description = "brain-dead simple config-ini parsing" +optional = false +python-versions = ">=3.7" +files = [ + {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, + {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, +] + +[[package]] +name = "mccabe" +version = "0.7.0" +description = "McCabe checker, plugin for flake8" +optional = false +python-versions = ">=3.6" +files = [ + {file = "mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e"}, + {file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"}, +] + +[[package]] +name = "mypy" +version = "1.11.2" +description = "Optional static typing for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "mypy-1.11.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d42a6dd818ffce7be66cce644f1dff482f1d97c53ca70908dff0b9ddc120b77a"}, + {file = "mypy-1.11.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:801780c56d1cdb896eacd5619a83e427ce436d86a3bdf9112527f24a66618fef"}, + {file = "mypy-1.11.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:41ea707d036a5307ac674ea172875f40c9d55c5394f888b168033177fce47383"}, + {file = "mypy-1.11.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6e658bd2d20565ea86da7d91331b0eed6d2eee22dc031579e6297f3e12c758c8"}, + {file = "mypy-1.11.2-cp310-cp310-win_amd64.whl", hash = "sha256:478db5f5036817fe45adb7332d927daa62417159d49783041338921dcf646fc7"}, + {file = "mypy-1.11.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:75746e06d5fa1e91bfd5432448d00d34593b52e7e91a187d981d08d1f33d4385"}, + {file = "mypy-1.11.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a976775ab2256aadc6add633d44f100a2517d2388906ec4f13231fafbb0eccca"}, + {file = "mypy-1.11.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:cd953f221ac1379050a8a646585a29574488974f79d8082cedef62744f0a0104"}, + {file = "mypy-1.11.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:57555a7715c0a34421013144a33d280e73c08df70f3a18a552938587ce9274f4"}, + {file = "mypy-1.11.2-cp311-cp311-win_amd64.whl", hash = "sha256:36383a4fcbad95f2657642a07ba22ff797de26277158f1cc7bd234821468b1b6"}, + {file = "mypy-1.11.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:e8960dbbbf36906c5c0b7f4fbf2f0c7ffb20f4898e6a879fcf56a41a08b0d318"}, + {file = "mypy-1.11.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:06d26c277962f3fb50e13044674aa10553981ae514288cb7d0a738f495550b36"}, + {file = "mypy-1.11.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6e7184632d89d677973a14d00ae4d03214c8bc301ceefcdaf5c474866814c987"}, + {file = "mypy-1.11.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:3a66169b92452f72117e2da3a576087025449018afc2d8e9bfe5ffab865709ca"}, + {file = "mypy-1.11.2-cp312-cp312-win_amd64.whl", hash = "sha256:969ea3ef09617aff826885a22ece0ddef69d95852cdad2f60c8bb06bf1f71f70"}, + {file = "mypy-1.11.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:37c7fa6121c1cdfcaac97ce3d3b5588e847aa79b580c1e922bb5d5d2902df19b"}, + {file = "mypy-1.11.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4a8a53bc3ffbd161b5b2a4fff2f0f1e23a33b0168f1c0778ec70e1a3d66deb86"}, + {file = "mypy-1.11.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2ff93107f01968ed834f4256bc1fc4475e2fecf6c661260066a985b52741ddce"}, + {file = "mypy-1.11.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:edb91dded4df17eae4537668b23f0ff6baf3707683734b6a818d5b9d0c0c31a1"}, + {file = "mypy-1.11.2-cp38-cp38-win_amd64.whl", hash = "sha256:ee23de8530d99b6db0573c4ef4bd8f39a2a6f9b60655bf7a1357e585a3486f2b"}, + {file = "mypy-1.11.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:801ca29f43d5acce85f8e999b1e431fb479cb02d0e11deb7d2abb56bdaf24fd6"}, + {file = "mypy-1.11.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:af8d155170fcf87a2afb55b35dc1a0ac21df4431e7d96717621962e4b9192e70"}, + {file = "mypy-1.11.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f7821776e5c4286b6a13138cc935e2e9b6fde05e081bdebf5cdb2bb97c9df81d"}, + {file = "mypy-1.11.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:539c570477a96a4e6fb718b8d5c3e0c0eba1f485df13f86d2970c91f0673148d"}, + {file = "mypy-1.11.2-cp39-cp39-win_amd64.whl", hash = "sha256:3f14cd3d386ac4d05c5a39a51b84387403dadbd936e17cb35882134d4f8f0d24"}, + {file = "mypy-1.11.2-py3-none-any.whl", hash = "sha256:b499bc07dbdcd3de92b0a8b29fdf592c111276f6a12fe29c30f6c417dd546d12"}, + {file = "mypy-1.11.2.tar.gz", hash = "sha256:7f9993ad3e0ffdc95c2a14b66dee63729f021968bff8ad911867579c65d13a79"}, +] + +[package.dependencies] +mypy-extensions = ">=1.0.0" +tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} +typing-extensions = ">=4.6.0" + +[package.extras] +dmypy = ["psutil (>=4.0)"] +install-types = ["pip"] +mypyc = ["setuptools (>=50)"] +reports = ["lxml"] + +[[package]] +name = "mypy-extensions" +version = "1.0.0" +description = "Type system extensions for programs checked with the mypy type checker." +optional = false +python-versions = ">=3.5" +files = [ + {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, + {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, +] + +[[package]] +name = "neo4j" +version = "4.4.12" +description = "Neo4j Bolt driver for Python" +optional = false +python-versions = ">=3.6" +files = [ + {file = "neo4j-4.4.12.tar.gz", hash = "sha256:b138271400e1ef2b89738e90ae0beb96b005f7dfedd68b17c4b85ee732d54125"}, +] + +[package.dependencies] +pytz = "*" + +[[package]] +name = "nodeenv" +version = "1.9.1" +description = "Node.js virtual environment builder" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +files = [ + {file = "nodeenv-1.9.1-py2.py3-none-any.whl", hash = "sha256:ba11c9782d29c27c70ffbdda2d7415098754709be8a7056d79a737cd901155c9"}, + {file = "nodeenv-1.9.1.tar.gz", hash = "sha256:6ec12890a2dab7946721edbfbcd91f3319c6ccc9aec47be7c7e6b7011ee6645f"}, +] + +[[package]] +name = "nox" +version = "2024.10.9" +description = "Flexible test automation." +optional = false +python-versions = ">=3.8" +files = [ + {file = "nox-2024.10.9-py3-none-any.whl", hash = "sha256:1d36f309a0a2a853e9bccb76bbef6bb118ba92fa92674d15604ca99adeb29eab"}, + {file = "nox-2024.10.9.tar.gz", hash = "sha256:7aa9dc8d1c27e9f45ab046ffd1c3b2c4f7c91755304769df231308849ebded95"}, +] + +[package.dependencies] +argcomplete = ">=1.9.4,<4" +colorlog = ">=2.6.1,<7" +packaging = ">=20.9" +tomli = {version = ">=1", markers = "python_version < \"3.11\""} +virtualenv = ">=20.14.1" + +[package.extras] +tox-to-nox = ["jinja2", "tox"] +uv = ["uv (>=0.1.6)"] + +[[package]] +name = "packaging" +version = "24.1" +description = "Core utilities for Python packages" +optional = false +python-versions = ">=3.8" +files = [ + {file = "packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"}, + {file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"}, +] + +[[package]] +name = "pathspec" +version = "0.12.1" +description = "Utility library for gitignore style pattern matching of file paths." +optional = false +python-versions = ">=3.8" +files = [ + {file = "pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08"}, + {file = "pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"}, +] + +[[package]] +name = "platformdirs" +version = "4.3.6" +description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." +optional = false +python-versions = ">=3.8" +files = [ + {file = "platformdirs-4.3.6-py3-none-any.whl", hash = "sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb"}, + {file = "platformdirs-4.3.6.tar.gz", hash = "sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907"}, +] + +[package.extras] +docs = ["furo (>=2024.8.6)", "proselint (>=0.14)", "sphinx (>=8.0.2)", "sphinx-autodoc-typehints (>=2.4)"] +test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=8.3.2)", "pytest-cov (>=5)", "pytest-mock (>=3.14)"] +type = ["mypy (>=1.11.2)"] + +[[package]] +name = "pluggy" +version = "1.5.0" +description = "plugin and hook calling mechanisms for python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"}, + {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"}, +] + +[package.extras] +dev = ["pre-commit", "tox"] +testing = ["pytest", "pytest-benchmark"] + +[[package]] +name = "pre-commit" +version = "4.0.1" +description = "A framework for managing and maintaining multi-language pre-commit hooks." +optional = false +python-versions = ">=3.9" +files = [ + {file = "pre_commit-4.0.1-py2.py3-none-any.whl", hash = "sha256:efde913840816312445dc98787724647c65473daefe420785f885e8ed9a06878"}, + {file = "pre_commit-4.0.1.tar.gz", hash = "sha256:80905ac375958c0444c65e9cebebd948b3cdb518f335a091a670a89d652139d2"}, +] + +[package.dependencies] +cfgv = ">=2.0.0" +identify = ">=1.0.0" +nodeenv = ">=0.11.1" +pyyaml = ">=5.1" +virtualenv = ">=20.10.0" + +[[package]] +name = "pycodestyle" +version = "2.12.1" +description = "Python style guide checker" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pycodestyle-2.12.1-py2.py3-none-any.whl", hash = "sha256:46f0fb92069a7c28ab7bb558f05bfc0110dac69a0cd23c61ea0040283a9d78b3"}, + {file = "pycodestyle-2.12.1.tar.gz", hash = "sha256:6838eae08bbce4f6accd5d5572075c63626a15ee3e6f842df996bf62f6d73521"}, +] + +[[package]] +name = "pyflakes" +version = "3.2.0" +description = "passive checker of Python programs" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pyflakes-3.2.0-py2.py3-none-any.whl", hash = "sha256:84b5be138a2dfbb40689ca07e2152deb896a65c3a3e24c251c5c62489568074a"}, + {file = "pyflakes-3.2.0.tar.gz", hash = "sha256:1c61603ff154621fb2a9172037d84dca3500def8c8b630657d1701f026f8af3f"}, +] + +[[package]] +name = "pytest" +version = "8.3.3" +description = "pytest: simple powerful testing with Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pytest-8.3.3-py3-none-any.whl", hash = "sha256:a6853c7375b2663155079443d2e45de913a911a11d669df02a50814944db57b2"}, + {file = "pytest-8.3.3.tar.gz", hash = "sha256:70b98107bd648308a7952b06e6ca9a50bc660be218d53c257cc1fc94fda10181"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "sys_platform == \"win32\""} +exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} +iniconfig = "*" +packaging = "*" +pluggy = ">=1.5,<2" +tomli = {version = ">=1", markers = "python_version < \"3.11\""} + +[package.extras] +dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] + +[[package]] +name = "pytz" +version = "2024.2" +description = "World timezone definitions, modern and historical" +optional = false +python-versions = "*" +files = [ + {file = "pytz-2024.2-py2.py3-none-any.whl", hash = "sha256:31c7c1817eb7fae7ca4b8c7ee50c72f93aa2dd863de768e1ef4245d426aa0725"}, + {file = "pytz-2024.2.tar.gz", hash = "sha256:2aa355083c50a0f93fa581709deac0c9ad65cca8a9e9beac660adcbd493c798a"}, +] + +[[package]] +name = "pyyaml" +version = "6.0.2" +description = "YAML parser and emitter for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"}, + {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68"}, + {file = "PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99"}, + {file = "PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e"}, + {file = "PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5"}, + {file = "PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b"}, + {file = "PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4"}, + {file = "PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652"}, + {file = "PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183"}, + {file = "PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563"}, + {file = "PyYAML-6.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083"}, + {file = "PyYAML-6.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706"}, + {file = "PyYAML-6.0.2-cp38-cp38-win32.whl", hash = "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a"}, + {file = "PyYAML-6.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725"}, + {file = "PyYAML-6.0.2-cp39-cp39-win32.whl", hash = "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631"}, + {file = "PyYAML-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8"}, + {file = "pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e"}, +] + +[[package]] +name = "tomli" +version = "2.0.2" +description = "A lil' TOML parser" +optional = false +python-versions = ">=3.8" +files = [ + {file = "tomli-2.0.2-py3-none-any.whl", hash = "sha256:2ebe24485c53d303f690b0ec092806a085f07af5a5aa1464f3931eec36caaa38"}, + {file = "tomli-2.0.2.tar.gz", hash = "sha256:d46d457a85337051c36524bc5349dd91b1877838e2979ac5ced3e710ed8a60ed"}, +] + +[[package]] +name = "typing-extensions" +version = "4.12.2" +description = "Backported and Experimental Type Hints for Python 3.8+" +optional = false +python-versions = ">=3.8" +files = [ + {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"}, + {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, +] + +[[package]] +name = "virtualenv" +version = "20.26.6" +description = "Virtual Python Environment builder" +optional = false +python-versions = ">=3.7" +files = [ + {file = "virtualenv-20.26.6-py3-none-any.whl", hash = "sha256:7345cc5b25405607a624d8418154577459c3e0277f5466dd79c49d5e492995f2"}, + {file = "virtualenv-20.26.6.tar.gz", hash = "sha256:280aede09a2a5c317e409a00102e7077c6432c5a38f0ef938e643805a7ad2c48"}, +] + +[package.dependencies] +distlib = ">=0.3.7,<1" +filelock = ">=3.12.2,<4" +platformdirs = ">=3.9.1,<5" + +[package.extras] +docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.2,!=7.3)", "sphinx-argparse (>=0.4)", "sphinxcontrib-towncrier (>=0.2.1a0)", "towncrier (>=23.6)"] +test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess (>=1)", "flaky (>=3.7)", "packaging (>=23.1)", "pytest (>=7.4)", "pytest-env (>=0.8.2)", "pytest-freezer (>=0.4.8)", "pytest-mock (>=3.11.1)", "pytest-randomly (>=3.12)", "pytest-timeout (>=2.1)", "setuptools (>=68)", "time-machine (>=2.10)"] + +[metadata] +lock-version = "2.0" +python-versions = "^3.9" +content-hash = "63b654a97a3f0c7cebe4798a26d1e709771484fdf521e83af3fac97216bf289f" diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..bf64751 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,46 @@ +[tool.poetry] +name = "norduniclient" +version = "1.4.4" +description = "Neo4j (>=4.4.12) database client using bolt for NORDUnet network inventory" +authors = [ + "Johan Lundberg ", + "Markus Krogh ", + "Benedith Mulongo ", + ] +readme = "norduniclient-package.md" +homepage = "https://github.com/NORDUnet/python-norduniclient" +repository = "https://github.com/NORDUnet/python-norduniclient" +documentation = "https://github.com/NORDUnet/python-norduniclient" +license = "Apache-2.0" + +[tool.poetry.dependencies] +python = "^3.9" +neo4j = "4.4.12" + + +[tool.poetry.group.dev.dependencies] +black = "^24.10.0" +mypy = "^1.11.2" +pytest = "^8.3.3" +flake8 = "^7.1.1" +nox = "^2024.10.9" +pre-commit = "^4.0.1" + + +[tool.flake8] +max-line-length = 125 +exclude = ['.git', '__pycache__'] +extend-ignore = [ + # PEP 8 recommends to treat : in slices as a binary operator with the lowest priority, and to leave an equal + # amount of space on either side, except if a parameter is omitted (e.g. ham[1 + 1 :]). + # This behaviour may raise E203 whitespace before ':' warnings in style guide enforcement tools like Flake8. + # Since E203 is not PEP 8 compliant, we tell Flake8 to ignore this warning. + # https://black.readthedocs.io/en/stable/the_black_code_style/current_style.html#slices + "F405", + "F403" +] + + +[build-system] +requires = ["poetry-core"] +build-backend = "poetry.core.masonry.api" diff --git a/release_for_neo4j_version.sh b/release_for_neo4j_version.sh new file mode 100755 index 0000000..2ff0232 --- /dev/null +++ b/release_for_neo4j_version.sh @@ -0,0 +1,10 @@ +#! /bin/bash + +set -e + +TAG="neo4j-3.0" + +git tag -d $TAG +git tag $TAG +git push -f origin $TAG + diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/test_core.py b/tests/test_core.py new file mode 100644 index 0000000..7248dbf --- /dev/null +++ b/tests/test_core.py @@ -0,0 +1,421 @@ +# -*- coding: utf-8 -*- + +from __future__ import absolute_import + +try: + from neo4j.exceptions import ConstraintError +except ImportError: + from neo4j.v1.api import CypherError as ConstraintError # Backwards compatability with version <1.2 + +from norduniclient.testing import Neo4jTestCase +from norduniclient import core +from norduniclient import exceptions +from norduniclient import models + +__author__ = 'lundberg' + + +class CoreTests(Neo4jTestCase): + + def setUp(self): + super(CoreTests, self).setUp() + core.create_node(self.neo4jdb, name='Test Node 1', meta_type_label='Logical', + type_label='Test_Node', handle_id='1') + core.create_node(self.neo4jdb, name='Test Node 2', meta_type_label='Logical', + type_label='Test_Node', handle_id='2') + + def test_create_and_get_node(self): + core.create_node(self.neo4jdb, name='Test Node 3', meta_type_label='Logical', + type_label='Test_Node', handle_id='3') + node = core.get_node(self.neo4jdb, handle_id='3') + self.assertEqual(node.get('handle_id'), '3') + + def test_create_node_existing_node_handle(self): + self.assertRaises(ConstraintError, core.create_node, self.neo4jdb, name='Test Node 1', + meta_type_label='Logical', type_label='Test_Node', handle_id='1') + + def test_create_node_bad_meta_type(self): + self.assertRaises(exceptions.MetaLabelNamingError, core.create_node, self.neo4jdb, name='Test Node 1', + meta_type_label='No_Such_Label', type_label='Test_Node', handle_id='1') + + def test_get_node_bundle(self): + node_bundle = core.get_node_bundle(self.neo4jdb, handle_id='1') + self.assertIsInstance(node_bundle, dict) + node_data = node_bundle.get('data') + self.assertEqual(node_data.get('handle_id'), '1') + self.assertEqual(node_bundle.get('meta_type'), 'Logical') + self.assertIsInstance(node_bundle.get('labels'), list) + self.assertIn('Test_Node', node_bundle.get('labels')) + + def test_failing_get_node_bundle(self): + self.assertRaises(exceptions.NodeNotFound, core.get_node_bundle, self.neo4jdb, handle_id='3') + + def test_delete_node(self): + core.delete_node(self.neo4jdb, handle_id='1') + self.assertRaises(exceptions.NodeNotFound, core.get_node, self.neo4jdb, handle_id='1') + + def test_create_and_get_relationship(self): + relationship_id = core._create_relationship(self.neo4jdb, handle_id='1', other_handle_id='2', rel_type='Tests') + self.assertIsInstance(relationship_id, int) + relationship = core.get_relationship_bundle(self.neo4jdb, relationship_id=relationship_id) + self.assertEqual(relationship['id'], relationship_id) + + def test_failing_get_relationship(self): + self.assertRaises(exceptions.RelationshipNotFound, core.get_relationship, self.neo4jdb, relationship_id=1) + + def test_get_relationship_bundle(self): + relationship_id = core._create_relationship(self.neo4jdb, handle_id='1', other_handle_id='2', rel_type='Tests') + relationship_bundle = core.get_relationship_bundle(self.neo4jdb, relationship_id=relationship_id) + self.assertIsInstance(relationship_bundle, dict) + relationship = relationship_bundle.get('data') + self.assertIsNotNone(relationship) + self.assertEqual(relationship_bundle.get('id'), relationship_id) + self.assertEqual(relationship_bundle.get('start')['handle_id'], '1') + self.assertEqual(relationship_bundle.get('end')['handle_id'], '2') + self.assertEqual(relationship_bundle.get('type'), 'Tests') + + def test_failing_get_relationship_bundle(self): + self.assertRaises(exceptions.RelationshipNotFound, core.get_relationship_bundle, self.neo4jdb, + relationship_id=1) + + def test_delete_relationship(self): + relationship_id = core._create_relationship(self.neo4jdb, handle_id='1', other_handle_id='2', rel_type='Tests') + relationship = core.get_relationship_bundle(self.neo4jdb, relationship_id=relationship_id) + self.assertEqual(relationship['id'], relationship_id) + core.delete_relationship(self.neo4jdb, relationship_id=relationship_id) + self.assertRaises(exceptions.RelationshipNotFound, core.get_relationship, self.neo4jdb, + relationship_id=relationship_id) + + def test_create_location_relationship(self): + core.create_node(self.neo4jdb, name='Location Node 1', meta_type_label='Location', + type_label='Test_Node', handle_id='3') + core.create_node(self.neo4jdb, name='Location Node 2', meta_type_label='Location', + type_label='Test_Node', handle_id='4') + relationship_id = core.create_location_relationship(self.neo4jdb, location_handle_id='3', other_handle_id='4', + rel_type='Has') + self.assertIsInstance(relationship_id, int) + + def test_failing_create_location_relationship(self): + core.create_node(self.neo4jdb, name='Location Node 1', meta_type_label='Location', + type_label='Test_Node', handle_id='3') + core.create_node(self.neo4jdb, name='Logical Node 2', meta_type_label='Logical', + type_label='Test_Node', handle_id='4') + self.assertRaises(exceptions.NoRelationshipPossible, core.create_location_relationship, self.neo4jdb, + location_handle_id='3', other_handle_id='4', rel_type='Has') + + def test_create_logical_relationship(self): + core.create_node(self.neo4jdb, name='Logical Node 1', meta_type_label='Logical', + type_label='Test_Node', handle_id='3') + core.create_node(self.neo4jdb, name='Physical Node 2', meta_type_label='Physical', + type_label='Test_Node', handle_id='4') + core.create_node(self.neo4jdb, name='Logical Node 2', meta_type_label='Logical', + type_label='Test_Node', handle_id='5') + + relationship_id = core.create_logical_relationship(self.neo4jdb, logical_handle_id='3', other_handle_id='4', + rel_type='Depends_on') + self.assertIsInstance(relationship_id, int) + + relationship_id = core.create_logical_relationship(self.neo4jdb, logical_handle_id='3', other_handle_id='5', + rel_type='Depends_on') + self.assertIsInstance(relationship_id, int) + + relationship_id = core.create_logical_relationship(self.neo4jdb, logical_handle_id='3', other_handle_id='4', + rel_type='Part_of') + self.assertIsInstance(relationship_id, int) + + def test_failing_create_logical_relationship(self): + core.create_node(self.neo4jdb, name='Logical Node 1', meta_type_label='Logical', + type_label='Test_Node', handle_id='3') + core.create_node(self.neo4jdb, name='Physical Node 2', meta_type_label='Physical', + type_label='Test_Node', handle_id='4') + core.create_node(self.neo4jdb, name='Logical Node 2', meta_type_label='Logical', + type_label='Test_Node', handle_id='5') + + self.assertRaises(exceptions.NoRelationshipPossible, core.create_location_relationship, self.neo4jdb, + location_handle_id='3', other_handle_id='4', rel_type='Has') + self.assertRaises(exceptions.NoRelationshipPossible,core.create_location_relationship, self.neo4jdb, + location_handle_id='3', other_handle_id='5', rel_type='Part_of') + self.assertRaises(exceptions.NoRelationshipPossible,core.create_location_relationship, self.neo4jdb, + location_handle_id='3', other_handle_id='5', rel_type='Has') + + def test_create_relation_relationship(self): + core.create_node(self.neo4jdb, name='Relation Node 1', meta_type_label='Relation', + type_label='Test_Node', handle_id='3') + core.create_node(self.neo4jdb, name='Logical Node 1', meta_type_label='Logical', + type_label='Test_Node', handle_id='4') + core.create_node(self.neo4jdb, name='Location Node 1', meta_type_label='Location', + type_label='Test_Node', handle_id='5') + core.create_node(self.neo4jdb, name='Physical Node 1', meta_type_label='Physical', + type_label='Test_Node', handle_id='6') + + relationship_id = core.create_relation_relationship(self.neo4jdb, relation_handle_id='3', other_handle_id='4', + rel_type='Uses') + self.assertIsInstance(relationship_id, int) + + relationship_id = core.create_relation_relationship(self.neo4jdb, relation_handle_id='3', other_handle_id='4', + rel_type='Provides') + self.assertIsInstance(relationship_id, int) + + relationship_id = core.create_relation_relationship(self.neo4jdb, relation_handle_id='3', other_handle_id='5', + rel_type='Responsible_for') + self.assertIsInstance(relationship_id, int) + + relationship_id = core.create_relation_relationship(self.neo4jdb, relation_handle_id='3', other_handle_id='6', + rel_type='Owns') + self.assertIsInstance(relationship_id, int) + + relationship_id = core.create_relation_relationship(self.neo4jdb, relation_handle_id='3', other_handle_id='6', + rel_type='Provides') + self.assertIsInstance(relationship_id, int) + + def test_failing_create_relation_relationship(self): + core.create_node(self.neo4jdb, name='Relation Node 1', meta_type_label='Relation', + type_label='Test_Node', handle_id='3') + core.create_node(self.neo4jdb, name='Logical Node 1', meta_type_label='Logical', + type_label='Test_Node', handle_id='4') + core.create_node(self.neo4jdb, name='Location Node 1', meta_type_label='Location', + type_label='Test_Node', handle_id='5') + core.create_node(self.neo4jdb, name='Physical Node 1', meta_type_label='Physical', + type_label='Test_Node', handle_id='6') + + self.assertRaises(exceptions.NoRelationshipPossible, core.create_relation_relationship, self.neo4jdb, + relation_handle_id='3', other_handle_id='5', rel_type='Uses') + + self.assertRaises(exceptions.NoRelationshipPossible, core.create_relation_relationship, self.neo4jdb, + relation_handle_id='3', other_handle_id='6', rel_type='Responsible_for') + + self.assertRaises(exceptions.NoRelationshipPossible, core.create_relation_relationship, + self.neo4jdb, relation_handle_id='3', other_handle_id='6', rel_type='Responsible_for') + + self.assertRaises(exceptions.NoRelationshipPossible, core.create_relation_relationship, self.neo4jdb, + relation_handle_id='3', other_handle_id='5', rel_type='Owns') + + self.assertRaises(exceptions.NoRelationshipPossible, core.create_relation_relationship, self.neo4jdb, + relation_handle_id='3', other_handle_id='5', rel_type='Provides') + + def test_create_physical_relationship(self): + core.create_node(self.neo4jdb, name='Physical Node 1', meta_type_label='Physical', + type_label='Test_Node', handle_id='3') + core.create_node(self.neo4jdb, name='Physical Node 2', meta_type_label='Physical', + type_label='Test_Node', handle_id='4') + core.create_node(self.neo4jdb, name='Location Node 1', meta_type_label='Location', + type_label='Test_Node', handle_id='5') + + relationship_id = core.create_physical_relationship(self.neo4jdb, physical_handle_id='3', other_handle_id='4', + rel_type='Has') + self.assertIsInstance(relationship_id, int) + + relationship_id = core.create_physical_relationship(self.neo4jdb, physical_handle_id='3', other_handle_id='4', + rel_type='Connected_to') + self.assertIsInstance(relationship_id, int) + + relationship_id = core.create_physical_relationship(self.neo4jdb, physical_handle_id='3', other_handle_id='5', + rel_type='Located_in') + self.assertIsInstance(relationship_id, int) + + def test_failing_create_physical_relationship(self): + core.create_node(self.neo4jdb, name='Physical Node 1', meta_type_label='Physical', + type_label='Test_Node', handle_id='3') + core.create_node(self.neo4jdb, name='Physical Node 2', meta_type_label='Physical', + type_label='Test_Node', handle_id='4') + core.create_node(self.neo4jdb, name='Location Node 1', meta_type_label='Location', + type_label='Test_Node', handle_id='5') + + self.assertRaises(exceptions.NoRelationshipPossible, core.create_physical_relationship, self.neo4jdb, + physical_handle_id='3', other_handle_id='4', rel_type='Located_in') + + self.assertRaises(exceptions.NoRelationshipPossible, core.create_physical_relationship, self.neo4jdb, + physical_handle_id='3', other_handle_id='4', rel_type='Responsible_for') + + self.assertRaises(exceptions.NoRelationshipPossible, core.create_physical_relationship, + self.neo4jdb, physical_handle_id='3', other_handle_id='5', rel_type='Has') + + def test_create_relationship(self): + core.create_node(self.neo4jdb, name='Location Node 1', meta_type_label='Location', + type_label='Test_Node', handle_id='3') + core.create_node(self.neo4jdb, name='Location Node 2', meta_type_label='Location', + type_label='Test_Node', handle_id='4') + core.create_node(self.neo4jdb, name='Relation Node 1', meta_type_label='Relation', + type_label='Test_Node', handle_id='5') + core.create_node(self.neo4jdb, name='Physical Node 1', meta_type_label='Physical', + type_label='Test_Node', handle_id='6') + + relationship_id = core.create_relationship(self.neo4jdb, handle_id='3', other_handle_id='4', + rel_type='Has') + self.assertIsInstance(relationship_id, int) + + relationship_id = core.create_relationship(self.neo4jdb, handle_id='5', other_handle_id='4', + rel_type='Responsible_for') + self.assertIsInstance(relationship_id, int) + + relationship_id = core.create_relationship(self.neo4jdb, handle_id='6', other_handle_id='4', + rel_type='Located_in') + self.assertIsInstance(relationship_id, int) + + def test_failing_create_relationship(self): + core.create_node(self.neo4jdb, name='Location Node 1', meta_type_label='Location', + type_label='Test_Node', handle_id='3') + core.create_node(self.neo4jdb, name='Location Node 2', meta_type_label='Logical', + type_label='Test_Node', handle_id='4') + self.assertRaises(exceptions.NoRelationshipPossible, core.create_relationship, self.neo4jdb, + handle_id='3', other_handle_id='4', rel_type='Has') + + def test_get_relationships(self): + relationship_id = core.create_relationship(self.neo4jdb, handle_id='1', other_handle_id='2', + rel_type='Depends_on') + + relationships = core.get_relationships(self.neo4jdb, handle_id1='1', handle_id2='2') + self.assertIn(relationship_id, [r.id for r in relationships]) + + relationships = core.get_relationships(self.neo4jdb, handle_id1='1', handle_id2='2', rel_type='Depends_on') + self.assertIn(relationship_id, [r.id for r in relationships]) + + # No relationship + core.create_node(self.neo4jdb, name='Location Node 1', meta_type_label='Location', + type_label='Test_Node', handle_id='3') + relationships = core.get_relationships(self.neo4jdb, handle_id1='1', handle_id2='3') + self.assertEqual(relationships, []) + + def test_set_node_properties(self): + new_properties = {'test': 'hello world'} + core.set_node_properties(self.neo4jdb, handle_id='1', new_properties=new_properties) + node = core.get_node(self.neo4jdb, handle_id='1') + new_properties.update({'handle_id': '1'}) + self.assertEqual(node['test'], new_properties['test']) + +# def test_fail_set_node_properties(self): +# new_properties = {'test': set([])} +# self.assertRaises(exceptions.BadProperties, core.set_node_properties, self.neo4jdb, +# handle_id='1', new_properties=new_properties) + + def test_set_relationship_properties(self): + relationship_id = core.create_relationship(self.neo4jdb, handle_id='1', other_handle_id='2', + rel_type='Depends_on') + new_properties = {'test': 'hello world'} + core.set_relationship_properties(self.neo4jdb, relationship_id=relationship_id, new_properties=new_properties) + relationship = core.get_relationship(self.neo4jdb, relationship_id=relationship_id) + self.assertEqual(relationship['test'], new_properties['test']) + +# def test_fail_set_relationship_properties(self): +# relationship_id = core.create_relationship(self.neo4jdb, handle_id='1', other_handle_id='2', +# rel_type='Depends_on') +# new_properties = {'test': set([])} +# self.assertRaises(exceptions.BadProperties, core.set_relationship_properties, self.neo4jdb, +# relationship_id=relationship_id, new_properties=new_properties) + + def test_get_node_model(self): + node_model = core.get_node_model(self.neo4jdb, handle_id='1') + self.assertIsInstance(node_model, models.LogicalModel) + + def test_get_relationship_model(self): + relationship_id = core.create_relationship(self.neo4jdb, handle_id='1', other_handle_id='2', + rel_type='Depends_on') + relationship_model = core.get_relationship_model(self.neo4jdb, relationship_id=relationship_id) + self.assertIsInstance(relationship_model, models.BaseRelationshipModel) + + def test_get_nodes_by_value_and_property(self): + new_properties = {'test': 'hello world'} + core.set_node_properties(self.neo4jdb, handle_id='1', new_properties=new_properties) + result = core.get_nodes_by_value(self.neo4jdb, value='hello world', prop='test') + + all_results = [r for r in result] + self.assertEqual(len(all_results), 1) + node = all_results[0] + self.assertEqual(node.get('test'), 'hello world') + + def test_get_nodes_by_value_and_property_list(self): + new_properties = {'test': ['hello', 'world']} + core.set_node_properties(self.neo4jdb, handle_id='1', new_properties=new_properties) + result = core.get_nodes_by_value(self.neo4jdb, value=['hello', 'world'], prop='test') + + all_results = [r for r in result] + self.assertEqual(len(all_results), 1) + node = all_results[0] + self.assertEqual(node.get('test'), ['hello', 'world']) + + def test_get_nodes_by_value_and_property_bool(self): + new_properties = {'test': False} + core.set_node_properties(self.neo4jdb, handle_id='1', new_properties=new_properties) + result = core.get_nodes_by_value(self.neo4jdb, value=False, prop='test') + + all_results = [r for r in result] + self.assertEqual(len(all_results), 1) + node = all_results[0] + self.assertEqual(node.get('test'), False) + + def test_get_nodes_by_value_and_property_int(self): + new_properties = {'test': 3} + core.set_node_properties(self.neo4jdb, handle_id='1', new_properties=new_properties) + result = core.get_nodes_by_value(self.neo4jdb, value=3, prop='test') + + all_results = [r for r in result] + self.assertEqual(len(all_results), 1) + node = all_results[0] + self.assertEqual(node.get('test'), 3) + + def test_search_nodes_by_value(self): + new_properties = {'test': 'hello world'} + core.set_node_properties(self.neo4jdb, handle_id='1', new_properties=new_properties) + + result = core.search_nodes_by_value(self.neo4jdb, value='world') + + all_results = [r for r in result] + self.assertEqual(len(all_results), 1) + node = all_results[0] + self.assertEqual(node.get('test'), 'hello world') + + def test_search_nodes_by_value_and_property(self): + new_properties = {'test': 'hello world'} + core.set_node_properties(self.neo4jdb, handle_id='1', new_properties=new_properties) + result = core.search_nodes_by_value(self.neo4jdb, value='world', prop='test') + + all_results = [r for r in result] + self.assertEqual(len(all_results), 1) + node = all_results[0] + self.assertEqual(node.get('test'), 'hello world') + + def test_search_nodes_by_value_in_list(self): + new_properties = {'test': ['hello', 'world']} + core.set_node_properties(self.neo4jdb, handle_id='1', new_properties=new_properties) + + result = core.search_nodes_by_value(self.neo4jdb, value='hel') + + all_results = [r for r in result] + self.assertEqual(len(all_results), 1) + node = all_results[0] + self.assertEqual(node.get('test'), ['hello', 'world']) + + def test_search_nodes_by_value_and_property_in_list(self): + new_properties = {'test': ['hello', 'world']} + core.set_node_properties(self.neo4jdb, handle_id='1', new_properties=new_properties) + result = core.search_nodes_by_value(self.neo4jdb, value='hel', prop='test') + + all_results = [r for r in result] + self.assertEqual(len(all_results), 1) + node = all_results[0] + self.assertEqual(node.get('test'), ['hello', 'world']) + + def test_get_nodes_by_type(self): + result = core.get_nodes_by_type(self.neo4jdb, 'Test_Node') + + for node in result: + self.assertIn('Test_Node', node.labels) + + def test_get_nodes_by_name(self): + result = core.get_nodes_by_name(self.neo4jdb, 'Test Node 1') + + all_results = [r for r in result] + self.assertEqual(len(all_results), 1) + node = all_results[0] + self.assertEqual(node['name'], 'Test Node 1') + + def test_get_unique_node_by_name(self): + node_model = core.get_unique_node_by_name(self.neo4jdb, node_name='Test Node 1', node_type='Test_Node') + self.assertIsInstance(node_model, models.LogicalModel) + + def test_failing_get_unique_node_by_name(self): + core.create_node(self.neo4jdb, name='Test Node 1', meta_type_label='Logical', + type_label='Test_Node', handle_id='3') + self.assertRaises(exceptions.MultipleNodesReturned, core.get_unique_node_by_name, self.neo4jdb, + node_name='Test Node 1', node_type='Test_Node') + diff --git a/tests/test_helpers.py b/tests/test_helpers.py new file mode 100644 index 0000000..8ce4f11 --- /dev/null +++ b/tests/test_helpers.py @@ -0,0 +1,34 @@ +# -*- coding: utf-8 -*- + +from __future__ import absolute_import + +import unittest +from norduniclient import helpers + +__author__ = 'lundberg' + + +class HelpersTests(unittest.TestCase): + + def test_update_item_properties(self): + initial_props = { + 'string': 'hello world', + 'delete_me': 'byebye', + 'list': ['hello', 'world'], + 'int': 3 + } + update_props = { + 'string': 'hola el mundo', + 'delete_me': '', + 'list': ['hello'], + 'int': 0 + } + new_props = helpers.update_item_properties(initial_props, update_props) + expected_props = { + 'string': 'hola el mundo', + 'list': ['hello'], + 'int': 0 + } + self.assertEqual(new_props, expected_props) + + diff --git a/tests/test_models.py b/tests/test_models.py new file mode 100644 index 0000000..fb9235a --- /dev/null +++ b/tests/test_models.py @@ -0,0 +1,726 @@ +# -*- coding: utf-8 -*- + +from __future__ import absolute_import + +from norduniclient.testing import Neo4jTestCase +from norduniclient import core +from norduniclient import exceptions +from norduniclient import models + +__author__ = 'lundberg' + + +class ModelsTests(Neo4jTestCase): + + def setUp(self): + super(ModelsTests, self).setUp() + q1 = """ + // Create nodes + CREATE (router1:Node:Physical:Router{name:'Router1', handle_id:'1'}), + (port1:Node:Physical:Port{name:'Port1', handle_id:'2'}), + (unit1:Node:Logical:Unit{name:'Unit1', handle_id:'3'}), + (port6:Node:Physical:Port{name:'Port6', handle_id:'4'}), + (unit2:Node:Logical:Unit{name:'Unit2', handle_id:'5'}), + (provider1:Node:Relation:Provider{name:'Provider1', handle_id:'6'}), + (peering_group1:Node:Logical:Peering_Group{name:'Peering Group1', handle_id:'7'}), + (peering_partner1:Node:Relation:Peering_Partner{name:'Peering Partner1', handle_id:'8'}), + (service2:Node:Logical:Service{name:'Service2', handle_id:'9'}), + (service3:Node:Logical:Service{name:'Service3', handle_id:'10'}), + (site1:Node:Location:Site{name:'Site1', handle_id:'11'}), + (rack1:Node:Location:Rack{name:'Rack1', handle_id:'12'}), + (optical_node1:Node:Physical:Optical_Node{name:'Optical Node1', handle_id:'13'}), + (port2:Node:Physical:Port{name:'Port2', handle_id:'14', description:'This is a port'}), + (rack2:Node:Location:Rack{name:'Rack2', handle_id:'15'}), + (optical_node2:Node:Physical:Optical_Node{name:'Optical Node2', handle_id:'16'}), + (port3:Node:Physical:Port{name:'Port3', handle_id:'17'}), + (site2:Node:Location:Site{name:'Site2', handle_id:'18'}), + (rack3:Node:Location:Rack{name:'Rack3', handle_id:'19'}), + (optical_path1:Node:Logical:Optical_Path{name:'Optical Path1', handle_id:'20'}), + (optical_link1:Node:Logical:Optical_Link{name:'Optical Link1', handle_id:'21'}), + (optical_link2:Node:Logical:Optical_Link{name:'Optical Link2', handle_id:'22'}), + (odf1:Node:Physical:ODF{name:'ODF1', handle_id:'23'}), + (port4:Node:Physical:Port{name:'Port4', handle_id:'24'}), + (odf2:Node:Physical:ODF{name:'ODF2', handle_id:'25'}), + (port5:Node:Physical:Port{name:'Port5', handle_id:'26'}), + (port7:Node:Physical:Port{name:'Port7', handle_id:'27'}), + (cable1:Node:Physical:Cable{name:'Cable1', handle_id:'28'}), + (cable2:Node:Physical:Cable{name:'Cable2', handle_id:'29'}), + (cable3:Node:Physical:Cable{name:'Cable3', handle_id:'30'}), + (cable4:Node:Physical:Cable{name:'Cable4', handle_id:'31'}), + (host1:Node:Physical:Host{name:'Host1', handle_id:'32'}), + (host2:Node:Logical:Host{name:'Host2', handle_id:'33'}), + (customer1:Node:Relation:Customer{name:'Customer1', handle_id:'34'}), + (customer2:Node:Relation:Customer{name:'Customer2', handle_id:'35'}), + (customer3:Node:Relation:Customer{name:'Customer3', handle_id:'36'}), + (customer4:Node:Relation:Customer{name:'Customer4', handle_id:'37'}), + (service4:Node:Logical:Service{name:'Service4', handle_id:'38'}), + (provider2:Node:Relation:Provider{name:'Provider2', handle_id:'39'}), + (port8:Node:Physical:Port{name:'Port8', handle_id:'40'}), + (rack4:Node:Location:Rack{name:'Rack4', handle_id:'41'}), + (cable5:Node:Physical:Cable{name:'Cable5', handle_id:'42'}), + (peering_group2:Node:Logical:Peering_Group{name:'Peering Group2', handle_id:'44'}), + (cable6:Node:Physical:Cable{name:'Cable6', handle_id:'45'}), + (service5:Node:Logical:Service{name:'Service5', handle_id:'46'}), + (external_equipment1:Node:Physical:External_Equipment{name:'External Equipment1', handle_id:'47'}), + + // Create relationships + (router1)-[:Has]->(port1), + (unit1)-[:Part_of]->(port1), + (router1)-[:Has]->(port6), + (unit2)-[:Part_of]->(port6), + (provider1)-[:Owns]->(router1), + (provider1)-[:Provides]->(peering_group1), + (peering_partner1)-[:Uses {ip_address:'127.0.0.1'}]->(peering_group1), + (peering_group1)-[:Depends_on]->(unit1), + (site1)-[:Has]->(rack1), + (router1)-[:Located_in]->(rack1), + (provider1)-[:Responsible_for]->(rack1), + (optical_node1)-[:Has]->(port2), + (site1)-[:Has]->(rack2), + (optical_node1)-[:Located_in]->(rack2), + (optical_node2)-[:Has]->(port3), + (site2)-[:Has]->(rack3), + (optical_node2)-[:Located_in]->(rack3), + (provider1)-[:Provides]->(optical_path1), + (service2)-[:Depends_on]->(optical_path1), + (service3)-[:Depends_on]->(unit2), + (odf1)-[:Located_in]->(rack2), + (odf1)-[:Has]->(port4), + (odf2)-[:Located_in]->(rack3), + (odf2)-[:Has]->(port5), + (odf2)-[:Has]->(port7), + (port4)<-[:Connected_to]-(cable1)-[:Connected_to]->(port2), + (port5)<-[:Connected_to]-(cable2)-[:Connected_to]->(port3), + (port4)<-[:Connected_to]-(cable3)-[:Connected_to]->(port5), + (port6)<-[:Connected_to]-(cable4)-[:Connected_to]->(port7), + (port7)<-[:Connected_to]-(cable5), + (optical_link1)-[:Depends_on]->(port2), + (optical_link2)-[:Depends_on]->(port3), + (optical_link1)-[:Depends_on]->(port4), + (optical_link2)-[:Depends_on]->(port5), + (optical_path1)-[:Depends_on]->(port4), + (optical_path1)-[:Depends_on]->(port5), + (optical_path1)-[:Depends_on]->(optical_link1), + (optical_path1)-[:Depends_on]->(optical_link2), + (provider1)-[:Owns]->(host1), + (host2)-[:Depends_on]->(host1), + (customer1)-[:Uses]->(host2), + (customer2)-[:Uses]->(service2), + (customer2)-[:Uses]->(service3), + (customer3)-[:Uses]->(service3), + (service5)-[:Depends_on]->(external_equipment1) + """ + + q2 = """ + // Create nodes + CREATE (physical1:Node:Physical:Generic{name:'Physical1', handle_id:'101'}), + (physical2:Node:Physical:Generic{name:'Physical2', handle_id:'102', description:'This is a port'}), + (logical1:Node:Logical:Generic{name:'Logical1', handle_id:'103'}), + (physical3:Node:Physical:Generic{name:'Physical3', handle_id:'104'}), + (logical2:Node:Logical:Generic{name:'Logical2', handle_id:'105'}), + (relation1:Node:Relation:Generic{name:'Relation1', handle_id:'106'}), + (logical3:Node:Logical:Generic{name:'Logical3', handle_id:'107'}), + (relation2:Node:Relation:Generic{name:'Relation2', handle_id:'108'}), + (location1:Node:Location:Generic{name:'Location1', handle_id:'109'}), + (location2:Node:Location:Generic{name:'Location2', handle_id:'110'}), + (logical4:Node:Logical:Generic{name:'Logical4', handle_id:'111'}), + (physical4:Node:Physical:Generic{name:'Physical4', handle_id:'112', description:'This is a cable'}), + + // Create relationships + (physical1)-[:Has]->(physical2), + (logical1)-[:Part_of]->(physical2), + (physical1)-[:Has]->(physical3), + (logical2)-[:Part_of]->(physical3), + (relation1)-[:Owns]->(physical1), + (relation1)-[:Provides]->(logical3), + (relation2)-[:Uses]->(logical3), + (logical3)-[:Depends_on]->(logical1), + (location1)-[:Has]->(location2), + (physical1)-[:Located_in]->(location2), + (relation1)-[:Responsible_for]->(location2), + (logical4)-[:Depends_on]->(logical3), + (physical2)<-[:Connected_to]-(physical4)-[:Connected_to]->(physical3) + """ + + # Insert mocked network + with self.neo4jdb.session as s: + s.run(q1) + + # Insert generic models + with self.neo4jdb.session as s: + s.run(q2) + + def test_base_node_model(self): + node_model_1 = core.get_node_model(self.neo4jdb, handle_id='101') + node_model_2 = core.get_node_model(self.neo4jdb, handle_id='102') + + self.assertIsNotNone(str(node_model_1)) + self.assertIsNotNone(repr(node_model_1)) + + self.assertEqual(node_model_1, node_model_1) + self.assertGreater(node_model_2, node_model_1) + self.assertLess(node_model_1, node_model_2) + + self.assertEqual(node_model_1.handle_id, '101') + self.assertIn(node_model_1.meta_type, core.META_TYPES) + self.assertIsInstance(node_model_1.labels, list) + self.assertIsNotNone(node_model_1.data) + self.assertIsInstance(node_model_1.incoming, dict) + self.assertIsInstance(node_model_1.outgoing, dict) + self.assertIsInstance(node_model_1.relationships, dict) + + def test_add_label(self): + node_model_1 = core.get_node_model(self.neo4jdb, handle_id='101') + initial_labels = node_model_1.labels + node_model_1.add_label('Test_Label') + node_model_1 = node_model_1.reload() + new_labels = node_model_1.labels + initial_labels.append('Test_Label') + self.assertEqual(sorted(new_labels), sorted(initial_labels)) + + def test_remove_label(self): + node_model_1 = core.get_node_model(self.neo4jdb, handle_id='101') + initial_labels = node_model_1.labels + node_model_1 = node_model_1.add_label('Test_Label') + new_labels = node_model_1.labels + expected_labels = initial_labels + ['Test_Label'] + self.assertEqual(sorted(new_labels), sorted(expected_labels)) + node_model_1 = node_model_1.remove_label('Test_Label') + new_labels = node_model_1.labels + self.assertEqual(sorted(new_labels), sorted(initial_labels)) + + def test_change_meta_type(self): + node_model_1 = core.get_node_model(self.neo4jdb, handle_id='101') + self.assertEqual(node_model_1.meta_type, 'Physical') + node_model_1 = node_model_1.change_meta_type('Logical') + self.assertEqual(node_model_1.meta_type, 'Logical') + + def test_switch_type(self): + node_model_1 = core.get_node_model(self.neo4jdb, handle_id='101') + self.assertIn('Generic', node_model_1.labels) + node_model_1 = node_model_1.switch_type(old_type='Generic', new_type='New_Type') + self.assertNotIn('Generic', node_model_1.labels) + self.assertIn('New_Type', node_model_1.labels) + + def test_delete(self): + node_model_1 = core.get_node_model(self.neo4jdb, handle_id='101') + node_model_1.delete() + self.assertRaises(exceptions.NodeNotFound, core.get_node_model, self.neo4jdb, handle_id='101') + + def test_base_relationship_model(self): + node_model_1 = core.get_node_model(self.neo4jdb, handle_id='101') + outgoing_relationships = node_model_1.outgoing + self.assertGreater(len(outgoing_relationships), 0) + + for rel_type, relationships in outgoing_relationships.items(): + self.assertIsNotNone(rel_type) + for item in relationships: + relationship_model = core.get_relationship_model(self.neo4jdb, item['relationship_id']) + self.assertIsNotNone(str(relationship_model)) + self.assertIsNotNone(repr(relationship_model)) + self.assertIsNotNone(relationship_model.type) + self.assertIsInstance(relationship_model.id, int) + self.assertIsNotNone(relationship_model.data) + self.assertEqual(relationship_model.start['handle_id'], node_model_1.handle_id) + self.assertEqual(relationship_model.end['handle_id'], item['node'].handle_id) + + def test_get_location_path(self): + # Model with location + physical1 = core.get_node_model(self.neo4jdb, handle_id='101') + location_path = physical1.get_location_path() + self.assertEqual(location_path['location_path'][0]['name'], 'Location1') + self.assertEqual(location_path['location_path'][1]['name'], 'Location2') + + # Model without location + relation1 = core.get_node_model(self.neo4jdb, handle_id='106') + location_path = relation1.get_location_path() + self.assertEqual(location_path['location_path'], []) + + def test_get_location(self): + # Model with location + physical1 = core.get_node_model(self.neo4jdb, handle_id='101') + location = physical1.get_location() + self.assertIsInstance(location['Located_in'][0]['node'], models.LocationModel) + self.assertEqual(location['Located_in'][0]['node'].data['name'], 'Location2') + self.assertIsInstance(location['Located_in'][0]['relationship_id'], int) + + # Model without location + relation1 = core.get_node_model(self.neo4jdb, handle_id='106') + location = relation1.get_location() + self.assertIsNone(location.get('Located_in')) + + def test_get_placement_path(self): + # Models with placement path + physical2 = core.get_node_model(self.neo4jdb, handle_id='102') + placement_path = physical2.get_placement_path() + self.assertEqual(placement_path['placement_path'][0]['name'], 'Physical1') + + # Model without placement path + relation1 = core.get_node_model(self.neo4jdb, handle_id='106') + location_path = relation1.get_placement_path() + self.assertEqual(location_path['placement_path'], []) + + def test_get_child_form_data(self): + physical1 = core.get_node_model(self.neo4jdb, handle_id='101') + child_form_data = physical1.get_child_form_data(node_type='Generic') + for data in child_form_data: + self.assertIn(data['handle_id'], ['102', '104']) + self.assertIn(data['name'], ['Physical2', 'Physical3']) + self.assertIn(data['description'], ['This is a port', None]) + self.assertEqual(data['labels'], [u'Node', u'Physical', u'Generic']) + + def test_get_relations(self): + physical1 = core.get_node_model(self.neo4jdb, handle_id='101') + relations = physical1.get_relations() + self.assertEqual(physical1.meta_type, 'Physical') + self.assertIsInstance(relations['Owns'][0]['node'], models.RelationModel) + + logical3 = core.get_node_model(self.neo4jdb, handle_id='107') + relations = logical3.get_relations() + self.assertEqual(logical3.meta_type, 'Logical') + self.assertIsInstance(relations['Uses'][0]['node'], models.RelationModel) + + logical3 = core.get_node_model(self.neo4jdb, handle_id='107') + relations = logical3.get_relations() + self.assertIsInstance(relations['Provides'][0]['node'], models.RelationModel) + + location2 = core.get_node_model(self.neo4jdb, handle_id='110') + relations = location2.get_relations() + self.assertIsInstance(relations['Responsible_for'][0]['node'], models.RelationModel) + + def test_get_dependencies(self): + logical3 = core.get_node_model(self.neo4jdb, handle_id='107') + dependencies = logical3.get_dependencies() + self.assertEqual(len(dependencies['Depends_on']), 1) + self.assertEqual(dependencies['Depends_on'][0]['node'].handle_id, '103') + self.assertIsInstance(dependencies['Depends_on'][0]['node'], models.LogicalModel) + + def test_get_dependents(self): + logical1 = core.get_node_model(self.neo4jdb, handle_id='103') + dependents = logical1.get_dependents() + self.assertEqual(len(dependents['Depends_on']), 1) + self.assertEqual(dependents['Depends_on'][0]['node'].handle_id, '107') + self.assertIsInstance(dependents['Depends_on'][0]['node'], models.LogicalModel) + + def test_get_dependent_as_types(self): + logical1 = core.get_node_model(self.neo4jdb, handle_id='103') + dependents = logical1.get_dependent_as_types() + self.assertEqual(dependents['direct'][0]['name'], 'Logical3') + self.assertEqual(dependents['links'], []) + self.assertEqual(dependents['oms'], []) + self.assertEqual(dependents['paths'], []) + self.assertEqual(dependents['services'], []) + + def test_get_dependent_as_types_port_with_unit_services(self): + port6 = core.get_node_model(self.neo4jdb, handle_id='4') + dependent = port6.get_dependent_as_types() + self.assertEqual(dependent['direct'], []) + self.assertEqual(dependent['links'], []) + self.assertEqual(dependent['oms'], []) + self.assertEqual(dependent['paths'], []) + self.assertEqual(len(dependent['services']), 1) + self.assertEqual(dependent['services'][0]['name'], 'Service3') + + def test_get_dependent_as_types_equipment(self): + external1 = core.get_node_model(self.neo4jdb, handle_id='47') + # a bit nasty just moving a port + external1.set_has('24') # port4 + dependents = external1.get_dependent_as_types() + self.assertIn('Service5', [n['name'] for n in dependents['direct']]) + self.assertIn('Optical Link1', [n['name'] for n in dependents['links']]) + self.assertIn('Optical Link2', [n['name'] for n in dependents['links']]) + self.assertEqual(dependents['oms'], []) + self.assertIn('Optical Path1', [n['name'] for n in dependents['paths']]) + self.assertIn('Service2', [n['name'] for n in dependents['services']]) + self.assertIn('Service5', [n['name'] for n in dependents['services']]) + + def test_get_dependent_as_types_equipment_only_direct(self): + external1 = core.get_node_model(self.neo4jdb, handle_id='47') + dependents = external1.get_dependent_as_types() + self.assertEqual(dependents['direct'][0]['name'], 'Service5') + self.assertEqual(dependents['links'], []) + self.assertEqual(dependents['oms'], []) + self.assertEqual(dependents['paths'], []) + self.assertEqual(dependents['services'][0]['name'], 'Service5') + + def test_get_dependencies_as_types(self): + logical4 = core.get_node_model(self.neo4jdb, handle_id='111') + dependencies = logical4.get_dependencies_as_types() + self.assertEqual(dependencies['direct'][0]['name'], 'Logical3') + self.assertEqual(dependencies['links'], []) + self.assertEqual(dependencies['oms'], []) + self.assertEqual(dependencies['paths'], []) + self.assertEqual(dependencies['services'], []) + + def test_get_ports(self): + physical4 = core.get_node_model(self.neo4jdb, handle_id='112') + ports = physical4.get_ports() + self.assertIsInstance(ports, list) + self.assertEqual(len(ports), 0) + + def test_get_part_of_logical_model(self): + unit1 = core.get_node_model(self.neo4jdb, handle_id='3') + part_of = unit1.get_part_of() + self.assertEqual(part_of['Part_of'][0]['node'].handle_id, '2') + + def test_set_user_logical_model(self): + customer4 = core.get_node_model(self.neo4jdb, handle_id='37') + service4 = core.get_node_model(self.neo4jdb, handle_id='38') + + result = service4.set_user(customer4.handle_id) + self.assertEqual(result['Uses'][0]['created'], True) + relations = service4.get_relations() + self.assertEqual(len(relations['Uses']), 1) + self.assertEqual(relations['Uses'][0]['node'].handle_id, customer4.handle_id) + + # Do not accept duplicates + result = service4.set_user(customer4.handle_id) + self.assertEqual(result['Uses'][0]['created'], False) + relations = service4.get_relations() + self.assertEqual(len(relations['Uses']), 1) + + def test_set_provider_logical_model(self): + provider_1 = core.get_node_model(self.neo4jdb, handle_id='6') + service4 = core.get_node_model(self.neo4jdb, handle_id='38') + + result = service4.set_provider(provider_1.handle_id) + self.assertEqual(result['Provides'][0]['created'], True) + relations = service4.get_relations() + self.assertEqual(len(relations['Provides']), 1) + self.assertEqual(relations['Provides'][0]['node'].handle_id, provider_1.handle_id) + + # Do not accept duplicates + result = service4.set_provider(provider_1.handle_id) + self.assertEqual(result['Provides'][0]['created'], False) + relations = service4.get_relations() + self.assertEqual(len(relations['Provides']), 1) + + def test_set_dependency_logical_model(self): + optical_path1 = core.get_node_model(self.neo4jdb, handle_id='20') + service4 = core.get_node_model(self.neo4jdb, handle_id='38') + + result = service4.set_dependency(optical_path1.handle_id) + self.assertEqual(result['Depends_on'][0]['created'], True) + relations = service4.get_dependencies() + self.assertEqual(len(relations['Depends_on']), 1) + self.assertEqual(relations['Depends_on'][0]['node'].handle_id, optical_path1.handle_id) + + # Do not accept duplicates + result = service4.set_dependency(optical_path1.handle_id) + self.assertEqual(result['Depends_on'][0]['created'], False) + relations = service4.get_dependencies() + self.assertEqual(len(relations['Depends_on']), 1) + + def test_get_location_physical_model(self): + router1 = core.get_node_model(self.neo4jdb, handle_id='1') + location = router1.get_location() + self.assertIsInstance(location['Located_in'][0]['node'], models.LocationModel) + self.assertEqual(location['Located_in'][0]['node'].data['name'], 'Rack1') + self.assertIsInstance(location['Located_in'][0]['relationship_id'], int) + + def test_set_owner_physical_model(self): + router1 = core.get_node_model(self.neo4jdb, handle_id='1') + customer4 = core.get_node_model(self.neo4jdb, handle_id='37') + + result = router1.set_owner(customer4.handle_id) + self.assertEqual(result['Owns'][0]['created'], True) + relations = router1.get_relations() + self.assertEqual(len(relations['Owns']), 2) + + # Do not accept duplicates + result = router1.set_owner(customer4.handle_id) + self.assertEqual(result['Owns'][0]['created'], False) + relations = router1.get_relations() + self.assertEqual(len(relations['Owns']), 2) + + def test_set_provider_physical_model(self): + router1 = core.get_node_model(self.neo4jdb, handle_id='1') + provider_2 = core.get_node_model(self.neo4jdb, handle_id='39') + + result = router1.set_provider(provider_2.handle_id) + self.assertEqual(result['Provides'][0]['created'], True) + relations = router1.get_relations() + self.assertEqual(len(relations['Provides']), 1) + self.assertEqual(relations['Provides'][0]['node'].handle_id, provider_2.handle_id) + + # Do not accept duplicates + result = router1.set_provider(provider_2.handle_id) + self.assertEqual(result['Provides'][0]['created'], False) + relations = router1.get_relations() + self.assertEqual(len(relations['Provides']), 1) + + def test_set_location_physical_model(self): + router1 = core.get_node_model(self.neo4jdb, handle_id='1') + rack_2 = core.get_node_model(self.neo4jdb, handle_id='15') + + result = router1.set_location(rack_2.handle_id) + self.assertEqual(result['Located_in'][0]['created'], True) + location = router1.get_location() + self.assertEqual(len(location['Located_in']), 2) + + # Do not accept duplicates + result = router1.set_location(rack_2.handle_id) + self.assertEqual(result['Located_in'][0]['created'], False) + location = router1.get_location() + self.assertEqual(len(location['Located_in']), 2) + + def test_set_and_get_has_physical_model(self): + router1 = core.get_node_model(self.neo4jdb, handle_id='1') + port8 = core.get_node_model(self.neo4jdb, handle_id='40') + + result = router1.set_has(port8.handle_id) + self.assertEqual(result['Has'][0]['created'], True) + children = router1.get_has() + self.assertEqual(len(children['Has']), 3) + + # Do not accept duplicates + result = router1.set_has(port8.handle_id) + self.assertEqual(result['Has'][0]['created'], False) + children = router1.get_has() + self.assertEqual(len(children['Has']), 3) + + def test_set_and_get_part_of_physical_model(self): + port8 = core.get_node_model(self.neo4jdb, handle_id='40') + unit1 = core.get_node_model(self.neo4jdb, handle_id='3') + + result = port8.set_part_of(unit1.handle_id) + self.assertEqual(result['Part_of'][0]['created'], True) + children = port8.get_part_of() + self.assertEqual(len(children['Part_of']), 1) + + # Do not accept duplicates + result = port8.set_part_of(unit1.handle_id) + self.assertEqual(result['Part_of'][0]['created'], False) + children = port8.get_part_of() + self.assertEqual(len(children['Part_of']), 1) + + def test_get_parent_physical_model(self): + port1 = core.get_node_model(self.neo4jdb, handle_id='2') + parent = port1.get_parent() + self.assertIsInstance(parent['Has'][0]['node'], models.PhysicalModel) + self.assertEqual(parent['Has'][0]['node'].data['name'], 'Router1') + self.assertIsInstance(parent['Has'][0]['relationship_id'], int) + + def test_get_location_path_location_model(self): + rack_2 = core.get_node_model(self.neo4jdb, handle_id='15') + location_path = rack_2.get_location_path() + self.assertEqual(location_path['location_path'][0]['name'], 'Site1') + + def test_get_parent_location_model(self): + rack_2 = core.get_node_model(self.neo4jdb, handle_id='15') + parent = rack_2.get_parent() + self.assertEqual(parent['Has'][0]['node'].data['name'], 'Site1') + + def test_get_located_in_location_model(self): + rack_2 = core.get_node_model(self.neo4jdb, handle_id='15') + located_in = rack_2.get_located_in() + self.assertEqual(len(located_in['Located_in']), 2) + optical_node = [node for node in located_in['Located_in'] if node['node'].data['name'] == 'Optical Node1'][0] + self.assertIsInstance(optical_node['node'], models.PhysicalModel) + self.assertIsInstance(optical_node['relationship_id'], int) + + def test_set_and_get_has_location_model(self): + site1 = core.get_node_model(self.neo4jdb, handle_id='11') + rack_4 = core.get_node_model(self.neo4jdb, handle_id='41') + + result = site1.set_has(rack_4.handle_id) + self.assertEqual(result['Has'][0]['created'], True) + children = site1.get_has() + self.assertEqual(len(children['Has']), 3) + + # Do not accept duplicates + result = site1.set_has(rack_4.handle_id) + self.assertEqual(result['Has'][0]['created'], False) + children = site1.get_has() + self.assertEqual(len(children['Has']), 3) + + def test_set_responsible_for_location_model(self): + rack_4 = core.get_node_model(self.neo4jdb, handle_id='41') + provider_2 = core.get_node_model(self.neo4jdb, handle_id='39') + + result = rack_4.set_responsible_for(provider_2.handle_id) + self.assertEqual(result['Responsible_for'][0]['created'], True) + relations = rack_4.get_relations() + self.assertEqual(len(relations['Responsible_for']), 1) + self.assertEqual(relations['Responsible_for'][0]['node'].handle_id, provider_2.handle_id) + + # Do not accept duplicates + result = rack_4.set_responsible_for(provider_2.handle_id) + self.assertEqual(result['Responsible_for'][0]['created'], False) + relations = rack_4.get_relations() + self.assertEqual(len(relations['Responsible_for']), 1) + + # TODO: EquipmentModel get_ports should probably work as CommonQueries get_ports + def test_get_ports_equipment_model(self): + odf1 = core.get_node_model(self.neo4jdb, handle_id='23') + ports = odf1.get_ports() + self.assertIsInstance(ports, dict) + self.assertEqual(len(ports['Has']), 1) + for rel_type, items in ports.items(): + self.assertEqual(len(items), 1) + + def test_get_port_equipment_model(self): + router1 = core.get_node_model(self.neo4jdb, handle_id='1') + ports = router1.get_port('Port1') + self.assertEqual(len(ports['Has']), 1) + self.assertIsInstance(ports['Has'][0]['node'], models.PortModel) + self.assertEqual(ports['Has'][0]['node'].data['name'], 'Port1') + + def test_get_dependent_as_types_equipment_model(self): + optical_node2 = core.get_node_model(self.neo4jdb, handle_id='16') + dependents = optical_node2.get_dependent_as_types() + self.assertEqual(dependents['direct'], []) + self.assertEqual(dependents['links'][0]['name'], 'Optical Link2') + self.assertEqual(dependents['oms'], []) + self.assertEqual(dependents['paths'][0]['name'], 'Optical Path1') + self.assertEqual(dependents['services'][0]['name'], 'Service2') + + def test_get_connections_equipment_model(self): + odf2 = core.get_node_model(self.neo4jdb, handle_id='25') + connections = odf2.get_connections() + self.assertEqual(len(connections), 4) + for connection in connections: + self.assertIsNotNone(connection['porta']) + self.assertIsNotNone(connection['cable']) + + def test_get_connections_subequipment_model(self): + port4 = core.get_node_model(self.neo4jdb, handle_id='24') + connections = port4.get_connections() + self.assertEqual(len(connections), 2) + for connection in connections: + self.assertIsNotNone(connection['porta']) + self.assertIsNotNone(connection['cable']) + + def test_get_dependent_as_types_host_model(self): + host1 = core.get_node_model(self.neo4jdb, handle_id='32') + dependents = host1.get_dependent_as_types() + self.assertEqual(dependents['direct'][0]['name'], 'Host2') + self.assertEqual(dependents['links'], []) + self.assertEqual(dependents['oms'], []) + self.assertEqual(dependents['paths'], []) + self.assertEqual(dependents['services'], []) + + # TODO: Fix duplicates + def test_get_units_port_model(self): + port1 = core.get_node_model(self.neo4jdb, handle_id='2') + units = port1.get_units() + self.assertEqual(units['Part_of'][0]['node'].handle_id, '3') + + def test_get_unit_port_model(self): + port1 = core.get_node_model(self.neo4jdb, handle_id='2') + units = port1.get_unit('Unit1') + self.assertEqual(units['Part_of'][0]['node'].handle_id, '3') + + def test_get_connected_to_port_model(self): + port4 = core.get_node_model(self.neo4jdb, handle_id='24') + connected_to = port4.get_connected_to() + self.assertIn(connected_to['Connected_to'][0]['node'].handle_id, ['28', '30']) + + def test_get_connection_path_port_model(self): + port4 = core.get_node_model(self.neo4jdb, handle_id='24') + connection_path = port4.get_connection_path() + self.assertEqual(len(connection_path), 7) + + def test_get_child_form_data_router_model(self): + physical1 = core.get_node_model(self.neo4jdb, handle_id='1') + child_form_data = physical1.get_child_form_data(node_type='Port') + self.assertEqual(child_form_data[0]['handle_id'], '2') + self.assertEqual(child_form_data[0]['name'], 'Port1') + self.assertEqual(child_form_data[0]['description'], None) + self.assertEqual(child_form_data[0]['labels'], [u'Node', u'Physical', u'Port']) + + def test_get_peering_groups_peering_partner_model(self): + peering_partner1 = core.get_node_model(self.neo4jdb, handle_id='8') + host_services = peering_partner1.get_peering_groups() + self.assertEqual(len(host_services['Uses']), 1) + self.assertIsInstance(host_services['Uses'][0]['node'], models.PeeringGroupModel) + self.assertEqual(host_services['Uses'][0]['node'].data['name'], 'Peering Group1') + self.assertEqual(host_services['Uses'][0]['relationship']['ip_address'], '127.0.0.1') + + def test_set_and_get_peering_group_peering_partner_model(self): + peering_partner1 = core.get_node_model(self.neo4jdb, handle_id='8') + peering_group2 = core.get_node_model(self.neo4jdb, handle_id='44') + + peering_partner1.set_peering_group(peering_group2.handle_id, ip_address='127.0.0.2') + peering_groups = peering_partner1.get_peering_group(peering_group2.handle_id, ip_address='127.0.0.2') + self.assertEqual(len(peering_groups['Uses']), 1) + + # TODO: Fix duplicates + + def test_set_and_get_group_dependency_peering_group_model(self): + peering_group2 = core.get_node_model(self.neo4jdb, handle_id='44') + unit2 = core.get_node_model(self.neo4jdb, handle_id='5') + + peering_group2.set_group_dependency(unit2.handle_id, ip_address='127.0.0.3') + dependencies = peering_group2.get_group_dependency(unit2.handle_id, ip_address='127.0.0.3') + self.assertEqual(len(dependencies['Depends_on']), 1) + + # TODO: Fix duplicates + + def test_get_connected_equipment_cable_model(self): + cable1 = core.get_node_model(self.neo4jdb, handle_id='28') + connections = cable1.get_connected_equipment() + self.assertEqual(len(connections), 2) + for connection in connections: + self.assertIsNotNone(connection['port']) + self.assertIsNotNone(connection['end']) + self.assertIsNotNone(connection['location']) + self.assertIsNotNone(connection['site']) + + def test_get_dependent_as_types_cable_model(self): + cable1 = core.get_node_model(self.neo4jdb, handle_id='28') + dependents = cable1.get_dependent_as_types() + + for optical_link in dependents['links']: + self.assertTrue(optical_link['name'] in ['Optical Link1', 'Optical Link2']) + self.assertEqual(dependents['oms'], []) + self.assertEqual(dependents['paths'][0]['name'], 'Optical Path1') + self.assertEqual(dependents['services'][0]['name'], 'Service2') + + def test_get_services_cable_model(self): + cable1 = core.get_node_model(self.neo4jdb, handle_id='28') + services = cable1.get_services() + self.assertEqual(len(services), 1) + self.assertEqual(services[0]['service']['name'], 'Service2') + self.assertIsInstance(services[0]['users'], list) + self.assertEqual(services[0]['users'][0]['name'], 'Customer2') + + def test_get_connection_path_cable_model(self): + cable1 = core.get_node_model(self.neo4jdb, handle_id='28') + connection_path = cable1.get_connection_path() + self.assertEqual(len(connection_path), 7) + + def test_set_connected_to_cable_model(self): + cable6 = core.get_node_model(self.neo4jdb, handle_id='45') + port7 = core.get_node_model(self.neo4jdb, handle_id='27') + + result = cable6.set_connected_to(port7.handle_id) + self.assertEqual(result['Connected_to'][0]['created'], True) + relationships = cable6.relationships + self.assertEqual(len(relationships['Connected_to']), 1) + + # Do not accept duplicates + result = cable6.set_connected_to(port7.handle_id) + self.assertEqual(result['Connected_to'][0]['created'], False) + relationships = cable6.relationships + self.assertEqual(len(relationships['Connected_to']), 1) + + def test_get_placement_path_unit_model(self): + unit1 = core.get_node_model(self.neo4jdb, handle_id='3') + placement_path = unit1.get_placement_path() + self.assertEqual(placement_path['placement_path'][0]['name'], 'Router1') + self.assertEqual(placement_path['placement_path'][1]['name'], 'Port1') + + def test_get_location_path_unit_model(self): + unit1 = core.get_node_model(self.neo4jdb, handle_id='3') + location_path = unit1.get_location_path() + self.assertEqual(location_path['location_path'][0]['name'], 'Site1') + self.assertEqual(location_path['location_path'][1]['name'], 'Rack1') + self.assertEqual(location_path['location_path'][2]['name'], 'Router1') + self.assertEqual(location_path['location_path'][3]['name'], 'Port1') + + def test_get_customers_service_model(self): + service2 = core.get_node_model(self.neo4jdb, handle_id='9') + customers = service2.get_customers() + self.assertEqual(len(customers['customers']), 1) + self.assertIsInstance(customers['customers'][0]['node'], models.CustomerModel)