Skip to content

Commit

Permalink
Implement prototype for torch based fermionic library.
Browse files Browse the repository at this point in the history
  • Loading branch information
hzhangxyz committed Nov 7, 2023
0 parents commit 6a6fd7c
Show file tree
Hide file tree
Showing 13 changed files with 3,604 additions and 0 deletions.
48 changes: 48 additions & 0 deletions .github/workflows/CI.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
name: CI

on: [push, pull_request]

jobs:
CI:

runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
include:
- python-version: "3.10"
pytorch-version: "1.12"
- python-version: "3.10"
pytorch-version: "1.13"
- python-version: "3.10"
pytorch-version: "2.0"
- python-version: "3.10"
pytorch-version: "2.1"

- python-version: "3.11"
pytorch-version: "1.13"
- python-version: "3.11"
pytorch-version: "2.0"
- python-version: "3.11"
pytorch-version: "2.1"

steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
- name: Install requirements
run: |
pip install pylint==2.17 mypy==1.6 pytest==7.4 pytest-cov==4.1
pip install torch==${{ matrix.pytorch-version }}+cpu --index-url https://download.pytorch.org/whl/cpu
pip install multimethod
- name: Run pylint
run: pylint tat tests
working-directory: ${{ github.workspace }}
- name: Run mypy
run: mypy tat tests
working-directory: ${{ github.workspace }}
- name: Run pytest
run: pytest
working-directory: ${{ github.workspace }}
4 changes: 4 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
.coverage
.mypy_cache
__pycache__
env
675 changes: 675 additions & 0 deletions LICENSE.md

Large diffs are not rendered by default.

3 changes: 3 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
# TAT

A Fermionic tensor library based on pytorch.
32 changes: 32 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
[project]
name = "tat"
version = "0.4.0"
authors = [
{email = "[email protected]", name = "Hao Zhang"}
]
description = "A Fermionic tensor library based on pytorch."
readme = "README.md"
requires-python = ">=3.10"
license = {text = "GPL-3.0-or-later"}
dependencies = [
"multimethod>=1.9",
"torch>=1.12",
]

[tool.pylint]
max-line-length = 120
generated-members = "torch.*"
init-hook="import sys; sys.path.append(\".\")"

[tool.yapf]
based_on_style = "google"
column_limit = 120

[tool.mypy]
check_untyped_defs = true
disallow_untyped_defs = true

[tool.pytest.ini_options]
pythonpath = "."
testpaths = ["tests",]
addopts = "--cov=tat"
6 changes: 6 additions & 0 deletions tat/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
"""
The tat is a Fermionic tensor library based on pytorch.
"""

from .edge import Edge
from .tensor import Tensor
41 changes: 41 additions & 0 deletions tat/_utility.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
"""
Some internal utilities used by tat.
"""

import torch

# pylint: disable=missing-function-docstring
# pylint: disable=no-else-return


def unsqueeze(tensor: torch.Tensor, index: int, rank: int) -> torch.Tensor:
return tensor.reshape([-1 if i == index else 1 for i in range(rank)])


def neg_symmetry(tensor: torch.Tensor) -> torch.Tensor:
if tensor.dtype is torch.bool:
return tensor
else:
return -tensor


def add_symmetry(tensor_1: torch.Tensor, tensor_2: torch.Tensor) -> torch.Tensor:
if tensor_1.dtype is torch.bool:
return torch.logical_xor(tensor_1, tensor_2)
else:
return torch.add(tensor_1, tensor_2)


def zero_symmetry(tensor: torch.Tensor) -> torch.Tensor:
# pylint: disable=singleton-comparison
if tensor.dtype is torch.bool:
return tensor == False
else:
return tensor == 0


def parity(tensor: torch.Tensor) -> torch.Tensor:
if tensor.dtype is torch.bool:
return tensor
else:
return tensor % 2 != 0
Loading

0 comments on commit 6a6fd7c

Please sign in to comment.